text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
Labels are currently not supported.
3,786
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
Returns: Examples: ```python >>> from transformers import ViltProcessor, ViltForImageAndTextRetrieval >>> import requests >>> from PIL import Image >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, strea...
3,786
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
>>> # forward pass >>> scores = dict() >>> for text in texts: ... # prepare inputs ... encoding = processor(image, text, return_tensors="pt") ... outputs = model(**encoding) ... scores[text] = outputs.logits[0, :].item() ```""" return_dict ...
3,786
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
pooler_output = outputs.pooler_output if return_dict else outputs[1] logits = self.rank_output(pooler_output) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( los...
3,786
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
class ViltForImagesAndTextClassification(ViltPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.vilt = ViltModel(config) # Classifier head num_images = config.num_images self.classifier = nn.Sequential( ...
3,787
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
@add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ViltForImagesAndTextClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None...
3,787
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Binary classification labels.
3,787
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
Returns: Examples: ```python >>> from transformers import ViltProcessor, ViltForImagesAndTextClassification >>> import requests >>> from PIL import Image >>> image1 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw) >>...
3,787
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
>>> # forward pass >>> outputs = model(input_ids=encoding.input_ids, pixel_values=encoding.pixel_values.unsqueeze(0)) >>> logits = outputs.logits >>> idx = logits.argmax(-1).item() >>> print("Predicted answer:", model.config.id2label[idx]) Predicted answer: True ```""" ...
3,787
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
num_images = pixel_values.shape[1] if pixel_values is not None else None if num_images is None: num_images = image_embeds.shape[1] if image_embeds is not None else None if num_images != self.config.num_images: raise ValueError( "Make sure to match the number of im...
3,787
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
inputs_embeds=inputs_embeds, image_embeds=image_embeds[:, i, :, :] if image_embeds is not None else None, image_token_type_idx=i + 1, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ...
3,787
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
pooled_output = torch.cat(pooler_outputs, dim=-1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # move labels to correct device to enable PP labels = labels.to(logits.device) loss = loss_f...
3,787
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
class ViltForTokenClassification(ViltPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.vilt = ViltModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn....
3,788
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
@add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_typ...
3,788
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
3,788
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.vilt( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values, pixel_mask=pix...
3,788
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
loss = None if labels is not None: loss_fct = CrossEntropyLoss() # move labels to correct device to enable PP labels = labels.to(logits.device) loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (lo...
3,788
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vilt/modeling_vilt.py
class FlaxElectraForPreTrainingOutput(ModelOutput): """ Output type of [`ElectraForPreTraining`]. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ...
3,789
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_h...
3,789
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" config: ElectraConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation
3,790
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.embedding_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.position_embeddings = nn.Embed( self.config.max_position_e...
3,790
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings.__call__ def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True): # Embed inputs_embeds = self.word_embeddings(input_ids.astype("i4")) position_embeds = self.position_...
3,790
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraSelfAttention(nn.Module): config: ElectraConfig causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_a...
3,791
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initialize...
3,791
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
3,791
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
@nn.compact # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached ...
3,791
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update...
3,791
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask
3,791
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic=True, output_attentions: bool = False, ): # if key_value_states are provided this layer ...
3,791
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] ...
3,791
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
# combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask =...
3,791
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
# Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), ...
3,791
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
# Mask heads if we want to if layer_head_mask is not None: attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) ...
3,791
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraSelfOutput(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dt...
3,792
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraAttention(nn.Module): config: ElectraConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.self = FlaxElectraSelfAttention(self.config, causal=self.causal, dtype=self.dtype) self.output = FlaxElectraSelfOutput(self.config, dtype=self.dtype)
3,793
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool = False, ): # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) ...
3,793
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
if output_attentions: outputs += (attn_outputs[1],) return outputs
3,793
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraIntermediate(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ...
3,794
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraOutput(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=...
3,795
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraLayer(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = FlaxElectraAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype) self.intermediate = FlaxElectraIntermediate(self.confi...
3,796
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: ...
3,796
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
# Cross-Attention Block if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_state...
3,796
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraLayerCollection(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxElectraCheckpointLayer = remat(FlaxElectraLayer, static_argnums...
3,797
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool =...
3,797
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
# Check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.shape[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for " ...
3,797
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: r...
3,797
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraEncoder(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.layer = FlaxElectraLayerCollection( self.config, dtype=self.dtype, gradient_ch...
3,798
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool =...
3,798
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraGeneratorPredictions(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dense = nn.Dense(self.config.embedding_size, dtype=self.dtype) def __call__(s...
3,799
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraDiscriminatorPredictions(nn.Module): """Prediction module for the discriminator, made up of two dense layers.""" config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) self.dense_prediction...
3,800
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ElectraConfig base_model_prefix = "electra" module_class: nn.Module = None def __init__...
3,801
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) # C...
3,801
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_o...
3,801
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freez...
3,801
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized ca...
3,801
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
@add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_atten...
3,801
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
# init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.ones_like(input_ids) if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: atte...
3,801
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
if self.config.add_cross_attention: # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be # changed by FlaxElectraAttention ...
3,801
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=...
3,801
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
# add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_di...
3,801
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
else: outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), ...
3,801
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.embeddings = FlaxElectraEmbeddings(self.config, dtype=self.dtype) if self.config.embedding_size != self.con...
3,802
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask: Optional[np.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False...
3,802
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
return self.encoder( embeddings, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_att...
3,802
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraModel(FlaxElectraPreTrainedModel): module_class = FlaxElectraModule
3,803
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraTiedDense(nn.Module): embedding_size: int dtype: jnp.dtype = jnp.float32 precision = None bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.bias = self.param("bias", self.bias_init, (self.embedding_size,)) def __call__(self, x, kern...
3,804
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForMaskedLMModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ...
3,805
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): ...
3,805
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
if self.config.tie_word_embeddings: shared_embedding = self.electra.variables["params"]["embeddings"]["word_embeddings"]["embedding"] prediction_scores = self.generator_lm_head(prediction_scores, shared_embedding.T) else: prediction_scores = self.generator_lm_head(prediction_...
3,805
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForMaskedLM(FlaxElectraPreTrainedModel): module_class = FlaxElectraForMaskedLMModule
3,806
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForPreTrainingModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ...
3,807
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): ...
3,807
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
return FlaxElectraForPreTrainingOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,807
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForPreTraining(FlaxElectraPreTrainedModel): module_class = FlaxElectraForPreTrainingModule
3,808
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForTokenClassificationModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpoin...
3,809
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): ...
3,809
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,809
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForTokenClassification(FlaxElectraPreTrainedModel): module_class = FlaxElectraForTokenClassificationModule
3,810
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraSequenceSummary(nn.Module): r""" Compute a single vector summary of a sequence hidden states. Args: config ([`PretrainedConfig`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your...
3,811
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
- **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tan...
3,811
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def setup(self): self.summary = identity if hasattr(self.config, "summary_use_proj") and self.config.summary_use_proj: if ( hasattr(self.config, "summary_proj_to_labels") and self.config.summary_proj_to_labels and self.config.num_labels > 0 ...
3,811
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
self.last_dropout = identity if hasattr(self.config, "summary_last_dropout") and self.config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(self.config.summary_last_dropout) def __call__(self, hidden_states, cls_index=None, deterministic: bool = True): """ Compute a si...
3,811
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
Returns: `jnp.ndarray`: The summary of the sequence hidden states. """ # NOTE: this doest "first" type summary always output = hidden_states[:, 0] output = self.first_dropout(output, deterministic=deterministic) output = self.summary(output) output = self.acti...
3,811
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForMultipleChoiceModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ...
3,812
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): ...
3,812
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
# Model outputs = self.electra( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ...
3,812
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForMultipleChoice(FlaxElectraPreTrainedModel): module_class = FlaxElectraForMultipleChoiceModule
3,813
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForQuestionAnsweringModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointi...
3,814
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): ...
3,814
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,814
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForQuestionAnswering(FlaxElectraPreTrainedModel): module_class = FlaxElectraForQuestionAnsweringModule
3,815
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) classifier_dropout = ( self.config.clas...
3,816
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForSequenceClassificationModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkp...
3,817
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): ...
3,817
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3,817
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForSequenceClassification(FlaxElectraPreTrainedModel): module_class = FlaxElectraForSequenceClassificationModule
3,818
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
class FlaxElectraForCausalLMModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ...
3,819
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
def __call__( self, input_ids, attention_mask: Optional[jnp.ndarray] = None, token_type_ids: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, ...
3,819
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] prediction_scores = self.generator_predictions(hidden_states)
3,819
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py
if self.config.tie_word_embeddings: shared_embedding = self.electra.variables["params"]["embeddings"]["word_embeddings"]["embedding"] prediction_scores = self.generator_lm_head(prediction_scores, shared_embedding.T) else: prediction_scores = self.generator_lm_head(prediction_...
3,819
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/electra/modeling_flax_electra.py