text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) 0.81 ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_outputs = outputs[0] prediction_scores = self.predictions(sequence_outputs)
3,153
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,153
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
class AlbertForSequenceClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.num_labels = config.num_labels self.config = config self.albert = AlbertModel(config) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # Initialize weights and apply final processing self.post_init()
3,154
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="textattack/albert-base-v2-imdb", output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'LABEL_1'", expected_loss=0.12, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[SequenceClassifierOutput, Tuple]: r"""
3,154
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,154
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output)
3,154
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification"
3,154
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,154
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
class AlbertForTokenClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.num_labels = config.num_labels self.albert = AlbertModel(config, add_pooling_layer=False) classifier_dropout_prob = ( config.classifier_dropout_prob if config.classifier_dropout_prob is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # Initialize weights and apply final processing self.post_init()
3,155
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[TokenClassifierOutput, Tuple]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
3,155
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,155
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
outputs = self.albert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output
3,155
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,155
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
class AlbertForQuestionAnswering(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.num_labels = config.num_labels self.albert = AlbertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init()
3,156
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="twmkn9/albert-base-v2-squad2", output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=12, qa_target_end_index=13, expected_output="'a nice puppet'", expected_loss=7.36, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None,
3,156
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
return_dict: Optional[bool] = None, ) -> Union[AlbertForPreTrainingOutput, Tuple]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,156
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits: torch.Tensor = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous()
3,156
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2
3,156
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,156
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
class AlbertForMultipleChoice(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.albert = AlbertModel(config) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init()
3,157
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[AlbertForPreTrainingOutput, Tuple]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
3,157
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
3,157
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.albert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
3,157
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits: torch.Tensor = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,157
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py
class AlbertTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" ALBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods
3,158
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert_fast.py
Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. remove_space (`bool`, *optional*, defaults to `True`): Whether or not to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (`bool`, *optional*, defaults to `False`): Whether or not to keep accents when tokenizing. bos_token (`str`, *optional*, defaults to `"[CLS]"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip>
3,158
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert_fast.py
When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip>
3,158
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert_fast.py
eos_token (`str`, *optional*, defaults to `"[SEP]"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths.
3,158
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert_fast.py
cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. """
3,158
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert_fast.py
vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = AlbertTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=True, remove_space=True, keep_accents=False, bos_token="[CLS]", eos_token="[SEP]", unk_token="<unk>", sep_token="[SEP]", pad_token="<pad>", cls_token="[CLS]", mask_token="[MASK]", **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. mask_token = ( AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token )
3,158
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert_fast.py
super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False
3,158
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert_fast.py
def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ALBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep
3,158
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert_fast.py
def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id]
3,158
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert_fast.py
if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
3,158
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert_fast.py
class FlaxAlbertForPreTrainingOutput(ModelOutput): """ Output type of [`FlaxAlbertForPreTraining`]. Args: prediction_logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). sop_logits (`jnp.ndarray` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
3,159
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ prediction_logits: jnp.ndarray = None sop_logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
3,159
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" config: AlbertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation
3,160
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.embedding_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.position_embeddings = nn.Embed( self.config.max_position_embeddings, self.config.embedding_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.token_type_embeddings = nn.Embed( self.config.type_vocab_size, self.config.embedding_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
3,160
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
def __call__(self, input_ids, token_type_ids, position_ids, deterministic: bool = True): # Embed inputs_embeds = self.word_embeddings(input_ids.astype("i4")) position_embeds = self.position_embeddings(position_ids.astype("i4")) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4")) # Sum all embeddings hidden_states = inputs_embeds + token_type_embeddings + position_embeds # Layer Norm hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states
3,160
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertSelfAttention(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` " " : {self.config.num_attention_heads}" )
3,161
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
3,161
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
def __call__(self, hidden_states, attention_mask, deterministic=True, output_attentions: bool = False): head_dim = self.config.hidden_size // self.config.num_attention_heads query_states = self.query(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) ) value_states = self.value(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) ) key_states = self.key(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) )
3,161
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
# Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout")
3,161
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) projected_attn_output = self.dense(attn_output) projected_attn_output = self.dropout(projected_attn_output, deterministic=deterministic) layernormed_attn_output = self.LayerNorm(projected_attn_output + hidden_states) outputs = (layernormed_attn_output, attn_weights) if output_attentions else (layernormed_attn_output,) return outputs
3,161
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertLayer(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = FlaxAlbertSelfAttention(self.config, dtype=self.dtype) self.ffn = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] self.ffn_output = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.full_layer_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
3,162
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
def __call__( self, hidden_states, attention_mask, deterministic: bool = True, output_attentions: bool = False, ): attention_outputs = self.attention( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions ) attention_output = attention_outputs[0] ffn_output = self.ffn(attention_output) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(ffn_output) ffn_output = self.dropout(ffn_output, deterministic=deterministic) hidden_states = self.full_layer_layer_norm(ffn_output + attention_output) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) return outputs
3,162
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertLayerCollection(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxAlbertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.inner_group_num) ] def __call__( self, hidden_states, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, ): layer_hidden_states = () layer_attentions = () for layer_index, albert_layer in enumerate(self.layers): layer_output = albert_layer( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, ) hidden_states = layer_output[0] if output_attentions: layer_attentions = layer_attentions + (layer_output[1],)
3,163
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (layer_hidden_states,) if output_attentions: outputs = outputs + (layer_attentions,) return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
3,163
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertLayerCollections(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation layer_index: Optional[str] = None def setup(self): self.albert_layers = FlaxAlbertLayerCollection(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, ): outputs = self.albert_layers( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) return outputs
3,164
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertLayerGroups(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxAlbertLayerCollections(self.config, name=str(i), layer_index=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_groups) ] def __call__( self, hidden_states, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = (hidden_states,) if output_hidden_states else None
3,165
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
for i in range(self.config.num_hidden_layers): # Index of the hidden group group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) layer_group_output = self.layers[group_idx]( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) hidden_states = layer_group_output[0] if output_attentions: all_attentions = all_attentions + layer_group_output[-1] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,)
3,165
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions )
3,165
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertEncoder(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.embedding_hidden_mapping_in = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.albert_layer_groups = FlaxAlbertLayerGroups(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): hidden_states = self.embedding_hidden_mapping_in(hidden_states) return self.albert_layer_groups( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, )
3,166
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertOnlyMLMHead(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.dense = nn.Dense(self.config.embedding_size, dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False) self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states)
3,167
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
if shared_embedding is not None: hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) hidden_states += self.bias return hidden_states
3,167
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertSOPHead(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dropout = nn.Dropout(self.config.classifier_dropout_prob) self.classifier = nn.Dense(2, dtype=self.dtype) def __call__(self, pooled_output, deterministic=True): pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) return logits
3,168
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = AlbertConfig base_model_prefix = "albert" module_class: nn.Module = None def __init__( self, config: AlbertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
3,169
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") token_type_ids = jnp.zeros_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) attention_mask = jnp.ones_like(input_ids) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, return_dict=False )["params"]
3,169
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params
3,169
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids)
3,169
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), jnp.array(position_ids, dtype="i4"), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs, )
3,169
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation add_pooling_layer: bool = True def setup(self): self.embeddings = FlaxAlbertEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxAlbertEncoder(self.config, dtype=self.dtype) if self.add_pooling_layer: self.pooler = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, name="pooler", ) self.pooler_activation = nn.tanh else: self.pooler = None self.pooler_activation = None
3,170
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
def __call__( self, input_ids, attention_mask, token_type_ids: Optional[np.ndarray] = None, position_ids: Optional[np.ndarray] = None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # make sure `token_type_ids` is correctly initialized when not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) # make sure `position_ids` is correctly initialized when not passed if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) hidden_states = self.embeddings(input_ids, token_type_ids, position_ids, deterministic=deterministic)
3,170
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
outputs = self.encoder( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.add_pooling_layer: pooled = self.pooler(hidden_states[:, 0]) pooled = self.pooler_activation(pooled) else: pooled = None if not return_dict: # if pooled is None, don't return it if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPooling( last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,170
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertModel(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertModule
3,171
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForPreTrainingModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype) self.predictions = FlaxAlbertOnlyMLMHead(config=self.config, dtype=self.dtype) self.sop_classifier = FlaxAlbertSOPHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.albert( input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
3,172
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
if self.config.tie_word_embeddings: shared_embedding = self.albert.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None hidden_states = outputs[0] pooled_output = outputs[1] prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding) sop_scores = self.sop_classifier(pooled_output, deterministic=deterministic) if not return_dict: return (prediction_scores, sop_scores) + outputs[2:] return FlaxAlbertForPreTrainingOutput( prediction_logits=prediction_scores, sop_logits=sop_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,172
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForPreTraining(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForPreTrainingModule
3,173
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForMaskedLMModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, add_pooling_layer=False, dtype=self.dtype) self.predictions = FlaxAlbertOnlyMLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.albert( input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
3,174
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.albert.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.predictions(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxMaskedLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,174
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForMaskedLM(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForMaskedLMModule
3,175
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForSequenceClassificationModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype) classifier_dropout = ( self.config.classifier_dropout_prob if self.config.classifier_dropout_prob is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense( self.config.num_labels, dtype=self.dtype, )
3,176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.albert( input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) if not return_dict: return (logits,) + outputs[2:]
3,176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForSequenceClassification(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForSequenceClassificationModule
3,177
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForMultipleChoiceModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype)
3,178
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
3,178
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
# Model outputs = self.albert( input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,178
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForMultipleChoice(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForMultipleChoiceModule
3,179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForTokenClassificationModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False) classifier_dropout = ( self.config.classifier_dropout_prob if self.config.classifier_dropout_prob is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
3,180
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.albert( input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:]
3,180
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,180
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForTokenClassification(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForTokenClassificationModule
3,181
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForQuestionAnsweringModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.albert( input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0]
3,182
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
logits = self.qa_outputs(hidden_states) start_logits, end_logits = logits.split(self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,182
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class FlaxAlbertForQuestionAnswering(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForQuestionAnsweringModule
3,183
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_flax_albert.py
class AlbertTokenizer(PreTrainedTokenizer): """ Construct an ALBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. remove_space (`bool`, *optional*, defaults to `True`): Whether or not to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (`bool`, *optional*, defaults to `False`): Whether or not to keep accents when tokenizing. bos_token (`str`, *optional*, defaults to `"[CLS]"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip>
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"[SEP]"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip>
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set:
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
- `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
def __init__( self, vocab_file, do_lower_case=True, remove_space=True, keep_accents=False, bos_token="[CLS]", eos_token="[SEP]", unk_token="<unk>", sep_token="[SEP]", pad_token="<pad>", cls_token="[CLS]", mask_token="[MASK]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. mask_token = ( AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token ) self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__( do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) @property def vocab_size(self) -> int: return len(self.sp_model) def get_vocab(self) -> Dict[str, int]: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def preprocess_text(self, inputs): if self.remove_space: outputs = " ".join(inputs.strip().split()) else: outputs = inputs outputs = outputs.replace("``", '"').replace("''", '"') if not self.keep_accents: outputs = unicodedata.normalize("NFKD", outputs) outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) if self.do_lower_case: outputs = outputs.lower() return outputs
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
def _tokenize(self, text: str) -> List[str]: """Tokenize a string.""" text = self.preprocess_text(text) pieces = self.sp_model.encode(text, out_type=str) new_pieces = [] for piece in pieces: if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit(): # Logic to handle special cases see https://github.com/google-research/bert/blob/master/README.md#tokenization # `9,9` -> ['▁9', ',', '9'] instead of [`_9,`, '9'] cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, "")) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else:
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
new_pieces.append(piece)
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
return new_pieces def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.sp_model.IdToPiece(index)
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip()
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py
def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ALBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep
3,184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/tokenization_albert.py