text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "efficientformer", None) is not None: with tf.name_scope(self.efficientformer.name): self.efficientformer.build(None)
10,482
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
class TFEfficientFormerForImageClassification(TFEfficientFormerPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: EfficientFormerConfig): super().__init__(config) self.num_labels = config.num_labels self.efficientformer = TFEfficientFormerMainLayer(config, name="efficientformer") # Classifier head self.classifier = ( keras.layers.Dense(config.num_labels, name="classifier") if config.num_labels > 0 else keras.layers.Activation("linear", name="classifier") ) self.config = config
10,483
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
@unpack_inputs @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: Optional[tf.Tensor] = None, labels: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[tf.Tensor, TFImageClassifierOutput]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
10,483
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
10,483
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
outputs = self.efficientformer( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.classifier(tf.reduce_mean(sequence_output, axis=-2)) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions )
10,483
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "efficientformer", None) is not None: with tf.name_scope(self.efficientformer.name): self.efficientformer.build(None) if getattr(self, "classifier", None) is not None: if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_sizes[-1]])
10,483
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
class TFEfficientFormerForImageClassificationWithTeacherOutput(ModelOutput): """ Args: Output type of [`EfficientFormerForImageClassificationWithTeacher`]. logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): Prediction scores as the average of the cls_logits and distillation logits. cls_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the class token). distillation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the distillation token). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
10,484
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """
10,484
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
logits: tf.Tensor = None cls_logits: tf.Tensor = None distillation_logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None
10,484
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
class TFEfficientFormerForImageClassificationWithTeacher(TFEfficientFormerPreTrainedModel): def __init__(self, config: EfficientFormerConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.efficientformer = TFEfficientFormerMainLayer(config, name="efficientformer") # Classifier heads self.classifier = ( keras.layers.Dense(config.num_labels, name="classifier") if config.num_labels > 0 else keras.layers.Activation("linear", name="classifier") ) self.distillation_classifier = ( keras.layers.Dense(config.num_labels, name="distillation_classifier") if config.num_labels > 0 else keras.layers.Activation("linear", name="distillation_classifier") )
10,485
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
@unpack_inputs @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFEfficientFormerForImageClassificationWithTeacherOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[tuple, TFEfficientFormerForImageClassificationWithTeacherOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if training: raise Exception( "This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet supported." )
10,485
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
outputs = self.efficientformer( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] cls_logits = self.classifier(tf.reduce_mean(sequence_output, axis=-2)) distillation_logits = self.distillation_classifier(tf.reduce_mean(sequence_output, axis=-2)) logits = (cls_logits + distillation_logits) / 2 if not return_dict: output = (logits, cls_logits, distillation_logits) + outputs[1:] return output return TFEfficientFormerForImageClassificationWithTeacherOutput( logits=logits, cls_logits=cls_logits, distillation_logits=distillation_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
10,485
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "efficientformer", None) is not None: with tf.name_scope(self.efficientformer.name): self.efficientformer.build(None) if getattr(self, "classifier", None) is not None: if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_sizes[-1]]) if getattr(self, "distillation_classifier", None) is not None: if hasattr(self.distillation_classifier, "name"): with tf.name_scope(self.distillation_classifier.name): self.distillation_classifier.build([None, None, self.config.hidden_sizes[-1]])
10,485
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
class ScaNNSearcher: """Note that ScaNNSearcher cannot currently be used within the model. In future versions, it might however be included.""" def __init__( self, db, num_neighbors, dimensions_per_block=2, num_leaves=1000, num_leaves_to_search=100, training_sample_size=100000, ): """Build scann searcher.""" from scann.scann_ops.py.scann_ops_pybind import builder as Builder builder = Builder(db=db, num_neighbors=num_neighbors, distance_measure="dot_product") builder = builder.tree( num_leaves=num_leaves, num_leaves_to_search=num_leaves_to_search, training_sample_size=training_sample_size ) builder = builder.score_ah(dimensions_per_block=dimensions_per_block) self.searcher = builder.build()
10,486
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/retrieval_realm.py
def search_batched(self, question_projection): retrieved_block_ids, _ = self.searcher.search_batched(question_projection.detach().cpu()) return retrieved_block_ids.astype("int64")
10,486
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/retrieval_realm.py
class RealmRetriever: """The retriever of REALM outputting the retrieved evidence block and whether the block has answers as well as answer positions." Parameters: block_records (`np.ndarray`): A numpy array which cantains evidence texts. tokenizer ([`RealmTokenizer`]): The tokenizer to encode retrieved texts. """ def __init__(self, block_records, tokenizer): super().__init__() self.block_records = block_records self.tokenizer = tokenizer def __call__(self, retrieved_block_ids, question_input_ids, answer_ids, max_length=None, return_tensors="pt"): retrieved_blocks = np.take(self.block_records, indices=retrieved_block_ids, axis=0) question = self.tokenizer.decode(question_input_ids[0], skip_special_tokens=True)
10,487
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/retrieval_realm.py
text = [] text_pair = [] for retrieved_block in retrieved_blocks: text.append(question) text_pair.append(retrieved_block.decode()) concat_inputs = self.tokenizer( text, text_pair, padding=True, truncation=True, return_special_tokens_mask=True, max_length=max_length ) concat_inputs_tensors = concat_inputs.convert_to_tensors(return_tensors) if answer_ids is not None: return self.block_has_answer(concat_inputs, answer_ids) + (concat_inputs_tensors,) else: return (None, None, None, concat_inputs_tensors)
10,487
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/retrieval_realm.py
@classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *init_inputs, **kwargs): if os.path.isdir(pretrained_model_name_or_path): block_records_path = os.path.join(pretrained_model_name_or_path, _REALM_BLOCK_RECORDS_FILENAME) else: block_records_path = hf_hub_download( repo_id=pretrained_model_name_or_path, filename=_REALM_BLOCK_RECORDS_FILENAME, **kwargs ) block_records = np.load(block_records_path, allow_pickle=True) tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) return cls(block_records, tokenizer) def save_pretrained(self, save_directory): # save block records np.save(os.path.join(save_directory, _REALM_BLOCK_RECORDS_FILENAME), self.block_records) # save tokenizer self.tokenizer.save_pretrained(save_directory)
10,487
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/retrieval_realm.py
def block_has_answer(self, concat_inputs, answer_ids): """check if retrieved_blocks has answers.""" has_answers = [] start_pos = [] end_pos = [] max_answers = 0 for input_id in concat_inputs.input_ids: input_id_list = input_id.tolist() # Check answers between two [SEP] tokens first_sep_idx = input_id_list.index(self.tokenizer.sep_token_id) second_sep_idx = first_sep_idx + 1 + input_id_list[first_sep_idx + 1 :].index(self.tokenizer.sep_token_id) start_pos.append([]) end_pos.append([]) for answer in answer_ids: for idx in range(first_sep_idx + 1, second_sep_idx): if answer[0] == input_id_list[idx]: if input_id_list[idx : idx + len(answer)] == answer: start_pos[-1].append(idx) end_pos[-1].append(idx + len(answer) - 1)
10,487
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/retrieval_realm.py
if len(start_pos[-1]) == 0: has_answers.append(False) else: has_answers.append(True) if len(start_pos[-1]) > max_answers: max_answers = len(start_pos[-1]) # Pad -1 to max_answers for start_pos_, end_pos_ in zip(start_pos, end_pos): if len(start_pos_) < max_answers: padded = [-1] * (max_answers - len(start_pos_)) start_pos_ += padded end_pos_ += padded return has_answers, start_pos, end_pos
10,487
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/retrieval_realm.py
class RealmTokenizer(PreTrainedTokenizer): r""" Construct a REALM tokenizer. [`RealmTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
Whether or not to tokenize Chinese characters.
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs, ): if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = RealmTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case,
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, )
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
@property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token))
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def batch_encode_candidates(self, text, **kwargs): r""" Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following differences: 1. Handle additional num_candidate axis. (batch_size, num_candidates, text) 2. Always pad the sequences to *max_length*. 3. Must specify *max_length* in order to stack packs of candidates into a batch. - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]`
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
Args: text (`List[List[str]]`): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). text_pair (`List[List[str]]`, *optional*): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). **kwargs: Keyword arguments of the __call__ method. Returns: [`BatchEncoding`]: Encoded text or text pair. Example: ```python >>> from transformers import RealmTokenizer >>> # batch_size = 2, num_candidates = 2 >>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] >>> tokenizer = RealmTokenizer.from_pretrained("google/realm-cc-news-pretrained-encoder") >>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") ```"""
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
# Always using a fixed sequence length to encode in order to stack candidates into a batch. kwargs["padding"] = PaddingStrategy.MAX_LENGTH batch_text = text batch_text_pair = kwargs.pop("text_pair", None) return_tensors = kwargs.pop("return_tensors", None) output_data = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(batch_text): if batch_text_pair is not None: candidate_text_pair = batch_text_pair[idx] else: candidate_text_pair = None encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs) encoded_input_ids = encoded_candidates.get("input_ids") encoded_attention_mask = encoded_candidates.get("attention_mask") encoded_token_type_ids = encoded_candidates.get("token_type_ids")
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
if encoded_input_ids is not None: output_data["input_ids"].append(encoded_input_ids) if encoded_attention_mask is not None: output_data["attention_mask"].append(encoded_attention_mask) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(encoded_token_type_ids) output_data = {key: item for key, item in output_data.items() if len(item) != 0} return BatchEncoding(output_data, tensor_type=return_tensors) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REALM sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]`
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method.
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1]
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A REALM sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs.
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,)
10,488
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
class BasicTokenizer: """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """
10,489
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
10,489
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text)
10,489
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
# This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token)
10,489
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
split_tokens.extend(self._run_split_on_punc(token, never_split))
10,489
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output)
10,489
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output]
10,489
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output)
10,489
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF)
10,489
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True
10,489
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output)
10,489
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
class WordpieceTokenizer: """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """
10,490
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end
10,490
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
10,490
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm.py
class RealmConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of 1. [`RealmEmbedder`] 2. [`RealmScorer`] 3. [`RealmKnowledgeAugEncoder`] 4. [`RealmRetriever`] 5. [`RealmReader`] 6. [`RealmForOpenQA`] It is used to instantiate an REALM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the REALM [google/realm-cc-news-pretrained-embedder](https://huggingface.co/google/realm-cc-news-pretrained-embedder) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
10,491
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/configuration_realm.py
Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the REALM model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RealmEmbedder`], [`RealmScorer`], [`RealmKnowledgeAugEncoder`], or [`RealmReader`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. retriever_proj_size (`int`, *optional*, defaults to 128): Dimension of the retriever(embedder) projection. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_candidates (`int`, *optional*, defaults to 8): Number of candidates inputted to the RealmScorer or RealmKnowledgeAugEncoder.
10,491
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/configuration_realm.py
intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
10,491
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/configuration_realm.py
type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`RealmEmbedder`], [`RealmScorer`], [`RealmKnowledgeAugEncoder`], or [`RealmReader`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. span_hidden_size (`int`, *optional*, defaults to 256): Dimension of the reader's spans. max_span_width (`int`, *optional*, defaults to 10): Max span width of the reader. reader_layer_norm_eps (`float`, *optional*, defaults to 1e-3): The epsilon used by the reader's layer normalization layers. reader_beam_size (`int`, *optional*, defaults to 5): Beam size of the reader.
10,491
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/configuration_realm.py
reader_seq_len (`int`, *optional*, defaults to 288+32): Maximum sequence length of the reader. num_block_records (`int`, *optional*, defaults to 13353718): Number of block records. searcher_beam_size (`int`, *optional*, defaults to 5000): Beam size of the searcher. Note that when eval mode is enabled, *searcher_beam_size* will be the same as *reader_beam_size*.
10,491
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/configuration_realm.py
Example: ```python >>> from transformers import RealmConfig, RealmEmbedder >>> # Initializing a REALM realm-cc-news-pretrained-* style configuration >>> configuration = RealmConfig() >>> # Initializing a model (with random weights) from the google/realm-cc-news-pretrained-embedder style configuration >>> model = RealmEmbedder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "realm"
10,491
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/configuration_realm.py
def __init__( self, vocab_size=30522, hidden_size=768, retriever_proj_size=128, num_hidden_layers=12, num_attention_heads=12, num_candidates=8, intermediate_size=3072, hidden_act="gelu_new", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, span_hidden_size=256, max_span_width=10, reader_layer_norm_eps=1e-3, reader_beam_size=5, reader_seq_len=320, # 288 + 32 num_block_records=13353718, searcher_beam_size=5000, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
10,491
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/configuration_realm.py
# Common config self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.retriever_proj_size = retriever_proj_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_candidates = num_candidates self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps # Reader config self.span_hidden_size = span_hidden_size self.max_span_width = max_span_width self.reader_layer_norm_eps = reader_layer_norm_eps self.reader_beam_size = reader_beam_size self.reader_seq_len = reader_seq_len
10,491
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/configuration_realm.py
# Retrieval config self.num_block_records = num_block_records self.searcher_beam_size = searcher_beam_size
10,491
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/configuration_realm.py
class RealmTokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" REALM tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. [`RealmTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`):
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)).
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. """
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = RealmTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs, ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, )
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase", do_lower_case) != do_lower_case or normalizer_state.get("strip_accents", strip_accents) != strip_accents or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars ): normalizer_class = getattr(normalizers, normalizer_state.pop("type")) normalizer_state["lowercase"] = do_lower_case normalizer_state["strip_accents"] = strip_accents normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def batch_encode_candidates(self, text, **kwargs): r""" Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following differences:
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
1. Handle additional num_candidate axis. (batch_size, num_candidates, text) 2. Always pad the sequences to *max_length*. 3. Must specify *max_length* in order to stack packs of candidates into a batch. - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: text (`List[List[str]]`): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). text_pair (`List[List[str]]`, *optional*): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). **kwargs: Keyword arguments of the __call__ method. Returns: [`BatchEncoding`]: Encoded text or text pair. Example: ```python >>> from transformers import RealmTokenizerFast
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
>>> # batch_size = 2, num_candidates = 2 >>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] >>> tokenizer = RealmTokenizerFast.from_pretrained("google/realm-cc-news-pretrained-encoder") >>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") ```""" # Always using a fixed sequence length to encode in order to stack candidates into a batch. kwargs["padding"] = PaddingStrategy.MAX_LENGTH batch_text = text batch_text_pair = kwargs.pop("text_pair", None) return_tensors = kwargs.pop("return_tensors", None) output_data = { "input_ids": [], "attention_mask": [], "token_type_ids": [], }
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
for idx, candidate_text in enumerate(batch_text): if batch_text_pair is not None: candidate_text_pair = batch_text_pair[idx] else: candidate_text_pair = None encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs) encoded_input_ids = encoded_candidates.get("input_ids") encoded_attention_mask = encoded_candidates.get("attention_mask") encoded_token_type_ids = encoded_candidates.get("token_type_ids") if encoded_input_ids is not None: output_data["input_ids"].append(encoded_input_ids) if encoded_attention_mask is not None: output_data["attention_mask"].append(encoded_attention_mask) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(encoded_token_type_ids)
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
output_data = {key: item for key, item in output_data.items() if len(item) != 0} return BatchEncoding(output_data, tensor_type=return_tensors) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REALM sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A REALM sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs.
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
10,492
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
class RealmEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
10,493
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False )
10,493
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
10,493
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids)
10,493
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
10,493
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
class RealmSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size)
10,494
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3)
10,494
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None
10,494
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states))
10,494
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer)
10,494
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
# Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r
10,494
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
10,494
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RealmModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer)
10,494
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs
10,494
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
class RealmSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
10,495
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
class RealmAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = REALM_SELF_ATTENTION_CLASSES[config._attn_implementation]( config, position_embedding_type=position_embedding_type ) self.output = RealmSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
10,496
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
# Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads)
10,496
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs
10,496
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
class RealmIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
10,497
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
class RealmOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
10,498
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
class RealmLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = RealmAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = RealmAttention(config, position_embedding_type="absolute") self.intermediate = RealmIntermediate(config) self.output = RealmOutput(config)
10,499
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0]
10,499
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
# if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" )
10,499
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value
10,499
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
10,499
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
class RealmEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([RealmLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False
10,500
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
10,500
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py
if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None
10,500
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deprecated/realm/modeling_realm.py