text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
3,538
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/diffllama/modular_diffllama.py
key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1) value_states = value_states.repeat(1, 2, 1, 1) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous()
3,538
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/diffllama/modular_diffllama.py
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal, ) attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
3,538
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/diffllama/modular_diffllama.py
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to( query_states.dtype ) lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to( query_states.dtype ) lambda_full = lambda_1 - lambda_2 + self.lambda_init attn_output = attn_output1 - lambda_full * attn_output2 attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return attn_output, None
3,538
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/diffllama/modular_diffllama.py
class DiffLlamaDecoderLayer(LlamaDecoderLayer): def __init__(self, config: DiffLlamaConfig, layer_idx: int): super().__init__(config, layer_idx) self.self_attn = DIFFLLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
3,539
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/diffllama/modular_diffllama.py
class DiffLlamaPreTrainedModel(LlamaPreTrainedModel): _supports_flex_attn = False
3,540
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/diffllama/modular_diffllama.py
class DiffLlamaModel(LlamaModel): pass
3,541
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/diffllama/modular_diffllama.py
class DiffLlamaForCausalLM(GemmaForCausalLM): pass
3,542
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/diffllama/modular_diffllama.py
class DiffLlamaForSequenceClassification(LlamaForSequenceClassification): pass
3,543
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/diffllama/modular_diffllama.py
class DiffLlamaForQuestionAnswering(LlamaForQuestionAnswering): pass
3,544
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/diffllama/modular_diffllama.py
class DiffLlamaForTokenClassification(LlamaForTokenClassification): pass
3,545
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/diffllama/modular_diffllama.py
class SqueezeBertTokenizer(PreTrainedTokenizer): r""" Construct a SqueezeBERT tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
Whether or not to tokenize Chinese characters.
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original SqueezeBERT). clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. """ vocab_files_names = VOCAB_FILES_NAMES
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs, ): if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = SqueezeBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, )
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder)
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize( text, never_split=self.all_special_tokens if not split_special_tokens else None ): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token)
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A SqueezeBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs.
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method.
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1]
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs.
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,)
3,546
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
class BasicTokenizer: """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters.
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text)
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
# This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False:
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split))
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output)
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output]
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output)
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF)
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output)
3,547
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
class WordpieceTokenizer: """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """
3,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end
3,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
3,548
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/tokenization_squeezebert.py
class SqueezeBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob)
3,549
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
# position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids)
3,549
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
3,549
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class MatMulWrapper(nn.Module): """ Wrapper for torch.matmul(). This makes flop-counting easier to implement. Note that if you directly call torch.matmul() in your code, the flop counter will typically ignore the flops of the matmul. """ def __init__(self): super().__init__() def forward(self, mat1, mat2): """ :param inputs: two torch tensors :return: matmul of these tensors Here are the typical dimensions found in BERT (the B is optional) mat1.shape: [B, <optional extra dims>, M, K] mat2.shape: [B, <optional extra dims>, K, N] output shape: [B, <optional extra dims>, M, N] """ return torch.matmul(mat1, mat2)
3,550
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertLayerNorm(nn.LayerNorm): """ This is a nn.LayerNorm subclass that accepts NCW data layout and performs normalization in the C dimension. N = batch C = channels W = sequence length """ def __init__(self, hidden_size, eps=1e-12): nn.LayerNorm.__init__(self, normalized_shape=hidden_size, eps=eps) # instantiates self.{weight, bias, eps} def forward(self, x): x = x.permute(0, 2, 1) x = nn.LayerNorm.forward(self, x) return x.permute(0, 2, 1)
3,551
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class ConvDropoutLayerNorm(nn.Module): """ ConvDropoutLayerNorm: Conv, Dropout, LayerNorm """ def __init__(self, cin, cout, groups, dropout_prob): super().__init__() self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups) self.layernorm = SqueezeBertLayerNorm(cout) self.dropout = nn.Dropout(dropout_prob) def forward(self, hidden_states, input_tensor): x = self.conv1d(hidden_states) x = self.dropout(x) x = x + input_tensor x = self.layernorm(x) return x
3,552
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class ConvActivation(nn.Module): """ ConvActivation: Conv, Activation """ def __init__(self, cin, cout, groups, act): super().__init__() self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups) self.act = ACT2FN[act] def forward(self, x): output = self.conv1d(x) return self.act(output)
3,553
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertSelfAttention(nn.Module): def __init__(self, config, cin, q_groups=1, k_groups=1, v_groups=1): """ config = used for some things; ignored for others (work in progress...) cin = input channels = output channels groups = number of groups to use in conv1d layers """ super().__init__() if cin % config.num_attention_heads != 0: raise ValueError( f"cin ({cin}) is not a multiple of the number of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(cin / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size
3,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
self.query = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=q_groups) self.key = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=k_groups) self.value = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=v_groups) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.softmax = nn.Softmax(dim=-1) self.matmul_qk = MatMulWrapper() self.matmul_qkv = MatMulWrapper() def transpose_for_scores(self, x): """ - input: [N, C, W] - output: [N, C1, W, C2] where C1 is the head index, and C2 is one head's contents """ new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W] x = x.view(*new_x_shape) return x.permute(0, 1, 3, 2) # [N, C1, C2, W] --> [N, C1, W, C2]
3,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
def transpose_key_for_scores(self, x): """ - input: [N, C, W] - output: [N, C1, C2, W] where C1 is the head index, and C2 is one head's contents """ new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W] x = x.view(*new_x_shape) # no `permute` needed return x def transpose_output(self, x): """ - input: [N, C1, W, C2] - output: [N, C, W] """ x = x.permute(0, 1, 3, 2).contiguous() # [N, C1, C2, W] new_x_shape = (x.size()[0], self.all_head_size, x.size()[3]) # [N, C, W] x = x.view(*new_x_shape) return x def forward(self, hidden_states, attention_mask, output_attentions): """ expects hidden_states in [N, C, W] data layout.
3,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
The attention_mask data layout is [N, W], and it does not need to be transposed. """ mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_key_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_score = self.matmul_qk(query_layer, key_layer) attention_score = attention_score / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_score = attention_score + attention_mask # Normalize the attention scores to probabilities. attention_probs = self.softmax(attention_score)
3,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
# This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = self.matmul_qkv(attention_probs, value_layer) context_layer = self.transpose_output(context_layer) result = {"context_layer": context_layer} if output_attentions: result["attention_score"] = attention_score return result
3,554
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertModule(nn.Module): def __init__(self, config): """ - hidden_size = input chans = output chans for Q, K, V (they are all the same ... for now) = output chans for the module - intermediate_size = output chans for intermediate layer - groups = number of groups for all layers in the BertModule. (eventually we could change the interface to allow different groups for different layers) """ super().__init__() c0 = config.hidden_size c1 = config.hidden_size c2 = config.intermediate_size c3 = config.hidden_size
3,555
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
self.attention = SqueezeBertSelfAttention( config=config, cin=c0, q_groups=config.q_groups, k_groups=config.k_groups, v_groups=config.v_groups ) self.post_attention = ConvDropoutLayerNorm( cin=c0, cout=c1, groups=config.post_attention_groups, dropout_prob=config.hidden_dropout_prob ) self.intermediate = ConvActivation(cin=c1, cout=c2, groups=config.intermediate_groups, act=config.hidden_act) self.output = ConvDropoutLayerNorm( cin=c2, cout=c3, groups=config.output_groups, dropout_prob=config.hidden_dropout_prob ) def forward(self, hidden_states, attention_mask, output_attentions): att = self.attention(hidden_states, attention_mask, output_attentions) attention_output = att["context_layer"]
3,555
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
post_attention_output = self.post_attention(attention_output, hidden_states) intermediate_output = self.intermediate(post_attention_output) layer_output = self.output(intermediate_output, post_attention_output) output_dict = {"feature_map": layer_output} if output_attentions: output_dict["attention_score"] = att["attention_score"] return output_dict
3,555
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertEncoder(nn.Module): def __init__(self, config): super().__init__() assert config.embedding_size == config.hidden_size, ( "If you want embedding_size != intermediate hidden_size, " "please insert a Conv1d layer to adjust the number of channels " "before the first SqueezeBertModule." ) self.layers = nn.ModuleList(SqueezeBertModule(config) for _ in range(config.num_hidden_layers)) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): if head_mask is None: head_mask_is_all_none = True elif head_mask.count(None) == len(head_mask): head_mask_is_all_none = True else: head_mask_is_all_none = False assert head_mask_is_all_none is True, "head_mask is not yet supported in the SqueezeBert implementation."
3,556
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
# [batch_size, sequence_length, hidden_size] --> [batch_size, hidden_size, sequence_length] hidden_states = hidden_states.permute(0, 2, 1) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for layer in self.layers: if output_hidden_states: hidden_states = hidden_states.permute(0, 2, 1) all_hidden_states += (hidden_states,) hidden_states = hidden_states.permute(0, 2, 1) layer_output = layer.forward(hidden_states, attention_mask, output_attentions) hidden_states = layer_output["feature_map"] if output_attentions: all_attentions += (layer_output["attention_score"],) # [batch_size, hidden_size, sequence_length] --> [batch_size, sequence_length, hidden_size] hidden_states = hidden_states.permute(0, 2, 1)
3,556
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions )
3,556
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
3,557
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
3,558
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = SqueezeBertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self) -> None: self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states
3,559
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = SqueezeBertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores
3,560
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SqueezeBertConfig base_model_prefix = "transformer"
3,561
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv1d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, SqueezeBertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
3,561
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertModel(SqueezeBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = SqueezeBertEmbeddings(config) self.encoder = SqueezeBertEncoder(config) self.pooler = SqueezeBertPooler(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads)
3,562
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = (
3,562
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,562
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
3,562
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
3,562
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
3,562
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertForMaskedLM(SqueezeBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.transformer = SqueezeBertModel(config) self.cls = SqueezeBertOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias
3,563
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
3,563
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,563
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
3,563
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,563
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertForSequenceClassification(SqueezeBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.transformer = SqueezeBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # Initialize weights and apply final processing self.post_init()
3,564
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
3,564
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,564
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output)
3,564
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification"
3,564
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,564
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertForMultipleChoice(SqueezeBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = SqueezeBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init()
3,565
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
@add_start_docstrings_to_model_forward( SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
3,565
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
3,565
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
3,565
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,565
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertForTokenClassification(SqueezeBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = SqueezeBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init()
3,566
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
3,566
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,566
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output
3,566
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,566
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertForQuestionAnswering(SqueezeBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = SqueezeBertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init()
3,567
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r"""
3,567
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,567
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous()
3,567
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2
3,567
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,567
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/modeling_squeezebert.py
class SqueezeBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SqueezeBertModel`]. It is used to instantiate a SqueezeBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SqueezeBERT [squeezebert/squeezebert-uncased](https://huggingface.co/squeezebert/squeezebert-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
3,568
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/configuration_squeezebert.py
Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SqueezeBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
3,568
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/squeezebert/configuration_squeezebert.py