text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
class FlaxElectraForCausalLM(FlaxElectraPreTrainedModel): module_class = FlaxElectraForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape
3,820
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyway. # Thus, we can create a single static att...
3,820
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs
3,820
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class ElectraConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ElectraModel`] or a [`TFElectraModel`]. It is used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the de...
3,821
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`]. embedding_size (`int`, *optional*, default...
3,821
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): ...
3,821
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. summary_type (`...
3,821
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
Has to be one of the following options: - `"last"`: Take the last token hidden state (like XLNet). - `"first"`: Take the first token hidden state (like BERT). - `"mean"`: Take the mean of all tokens hidden states. - `"cls_index"`: Supply a Tensor of class...
3,821
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
Pass `"gelu"` for a gelu activation to the output, any other value will result in no activation. summary_last_dropout (`float`, *optional*, defaults to 0.0): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
3,821
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
The dropout ratio to be used after the projection and activation. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more ...
3,821
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
The dropout ratio for the classification head.
3,821
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
Examples: ```python >>> from transformers import ElectraConfig, ElectraModel >>> # Initializing a ELECTRA electra-base-uncased style configuration >>> configuration = ElectraConfig() >>> # Initializing a model (with random weights) from the electra-base-uncased style configuration >>> model =...
3,821
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
def __init__( self, vocab_size=30522, embedding_size=128, hidden_size=256, num_hidden_layers=12, num_attention_heads=4, intermediate_size=1024, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_positi...
3,821
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self....
3,821
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
class ElectraOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( ...
3,822
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/configuration_electra.py
class TFElectraSelfAttention(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the ...
3,823
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
self.query = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) ...
3,823
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
# Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, ...
3,823
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(sel...
3,823
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
3,823
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_s...
3,823
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
# Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = ...
3,823
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
# Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_...
3,823
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is no...
3,823
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraSelfOutput(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.Lay...
3,824
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None)...
3,824
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraAttention(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFElectraSelfAttention(config, name="self") self.dense_output = TFElectraSelfOutput(config, name="output") def prune_heads(self, heads): ...
3,825
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) ...
3,825
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
outputs = (attention_output,) + self_outputs[1:]
3,825
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "den...
3,825
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraIntermediate(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) ...
3,826
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size])
3,826
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraOutput(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNo...
3,827
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm",...
3,827
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraLayer(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.attention = TFElectraAttention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention i...
3,828
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_value: Tuple[tf.Tensor] | None, output_attentions: bool, training...
3,828
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
# if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights ...
3,828
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( input_tensor=attention_output, attention_mask=at...
3,828
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
# add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output...
3,828
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: ...
3,828
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraEncoder(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFElectraLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_sta...
3,829
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_...
3,829
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Add last layer if output_hidden_s...
3,829
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None)
3,829
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraPooler(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", ...
3,830
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.max...
3,831
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_sco...
3,831
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
# Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, past_key_values_length=0, training: bo...
3,831
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
if position_ids is None: position_ids = tf.expand_dims( tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0 ) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gathe...
3,831
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraDiscriminatorPredictions(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.hidden_size, name="dense") self.dense_prediction = keras.layers.Dense(1, name="dense_prediction") self.config = config...
3,832
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "dense_prediction"...
3,832
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraGeneratorPredictions(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dense = keras.layers.Dense(config.embedding_size, name="dense") ...
3,833
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.embedding_size]) if getattr(self, "de...
3,833
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ElectraConfig base_model_prefix = "electra" # When the model is loaded from a PT model _keys...
3,834
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraMainLayer(keras.layers.Layer): config_class = ElectraConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.is_decoder = config.is_decoder self.embeddings = TFElectraEmbeddings(config, name="embeddings") if con...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def get_extended_attention_mask(self, attention_mask, input_shape, dtype, past_key_values_length=0): batch_size, seq_length = input_shape if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) # We create a 3D attention ...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
mask_seq_length = seq_length + past_key_values_length # Copied from `modeling_tf_t5.py` # Provided a padding mask of dimensions [batch_size, mask_seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask b...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2]) ) if past_key_values_length > 0: extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = tf.reshape( ...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # eff...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
@unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor ...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(in...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
hidden_states = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, training=training, ) extended_attention_mask...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
# Copied from `modeling_tf_t5.py` with -1e9 -> -10000 if self.is_decoder and encoder_attention_mask is not None: # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] # w...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = tf.math.equal(encoder...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
hidden_states = self.encoder( hidden_states=hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_va...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: ...
3,835
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraForPreTrainingOutput(ModelOutput): """ Output type of [`TFElectraForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`): Total loss of the ELECTRA objective. logits (`tf.Tensor` of shape `(batch_size, sequence_le...
3,836
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads...
3,836
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraModel(TFElectraPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.electra = TFElectraMainLayer(config, name="electra")
3,837
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
@unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( ...
3,837
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`...
3,837
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
- 1 for tokens that are **not masked**, - 0 for tokens that are **masked**.
3,837
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that ...
3,837
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_s...
3,837
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "electra", None) is not None: with tf.name_scope(self.electra.name): self.electra.build(None)
3,837
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraForPreTraining(TFElectraPreTrainedModel): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.electra = TFElectraMainLayer(config, name="electra") self.discriminator_predictions = TFElectraDiscriminatorPredictions(config, name="discriminator_predicti...
3,838
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
@unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attenti...
3,838
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFElectraForPreTraining
3,838
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
>>> tokenizer = AutoTokenizer.from_pretrained("google/electra-small-discriminator") >>> model = TFElectraForPreTraining.from_pretrained("google/electra-small-discriminator") >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 >>> outputs = model(input_...
3,838
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
if not return_dict: return (logits,) + discriminator_hidden_states[1:] return TFElectraForPreTrainingOutput( logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, ) def build(self, ...
3,838
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraMaskedLMHead(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.input_embeddings = input_embeddings def build(self, input_shape): se...
3,839
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def call(self, hidden_states): seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor...
3,839
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraForMaskedLM(TFElectraPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.config = config self.electra = TFElectraMainLayer(config, name="electra") self.generator_predictions = TFElectraGenerator...
3,840
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
@unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="google/electra-small-generator", output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="[MASK]", expec...
3,840
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with...
3,840
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
prediction_scores = self.generator_predictions(generator_sequence_output, training=training) prediction_scores = self.generator_lm_head(prediction_scores, training=training) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
3,840
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
if not return_dict: output = (prediction_scores,) + generator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_state...
3,840
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraClassificationHead(keras.layers.Layer): """Head for sentence-level classification tasks.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_ran...
3,841
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
def call(self, inputs, **kwargs): x = inputs[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = get_tf_activation("gelu")(x) # although BERT uses tanh here, it seems Electra authors used gelu here x = self.dropout(x) x = self.out_proj(...
3,841
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraForSequenceClassification(TFElectraPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.electra = TFElectraMainLayer(config, name="electra") s...
3,842
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
@unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="bhadresh-savani/electra-base-emotion", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_out...
3,842
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regres...
3,842
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
loss = None if labels is None else self.hf_compute_loss(labels, logits)
3,842
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions...
3,842
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
class TFElectraForMultipleChoice(TFElectraPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.electra = TFElectraMainLayer(config, name="electra") self.sequence_summary = TFSequenceSummary( confi...
3,843
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
@unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( ...
3,843
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """
3,843
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2]
3,843
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else Non...
3,843
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py
return_dict=return_dict, training=training, ) logits = self.sequence_summary(outputs[0]) logits = self.classifier(logits) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
3,843
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_tf_electra.py