text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class FlaxRobertaPreLayerNormSelfOutput(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = hidden_states + input_tensor return hidden_states
class_definition
16,097
16,833
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,700
class FlaxRobertaPreLayerNormAttention(nn.Module): config: RobertaPreLayerNormConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.self = FlaxRobertaPreLayerNormSelfAttention(self.config, causal=self.causal, dtype=self.dtype) self.output = FlaxRobertaPreLayerNormSelfOutput(self.config, dtype=self.dtype) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool = False, ): hidden_states_pre_layer_norm = self.LayerNorm(hidden_states) # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length) attn_outputs = self.self( hidden_states_pre_layer_norm, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs
class_definition
16,836
18,476
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,701
class FlaxRobertaPreLayerNormIntermediate(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states
class_definition
18,479
19,233
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,702
class FlaxRobertaPreLayerNormOutput(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, attention_output, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = hidden_states + attention_output return hidden_states
class_definition
19,236
19,976
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,703
class FlaxRobertaPreLayerNormLayer(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = FlaxRobertaPreLayerNormAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype) self.intermediate = FlaxRobertaPreLayerNormIntermediate(self.config, dtype=self.dtype) self.output = FlaxRobertaPreLayerNormOutput(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = FlaxRobertaPreLayerNormAttention(self.config, causal=False, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, ): # Self Attention attention_outputs = self.attention( hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = attention_outputs[0] # Cross-Attention Block if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs
class_definition
20,082
22,312
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,704
class FlaxRobertaPreLayerNormLayerCollection(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxRobertaPreLayerNormCheckpointLayer = remat(FlaxRobertaPreLayerNormLayer, static_argnums=(5, 6, 7)) self.layers = [ FlaxRobertaPreLayerNormCheckpointLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] else: self.layers = [ FlaxRobertaPreLayerNormLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # Check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.shape[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for " f" {head_mask.shape[0]}." ) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, head_mask[i] if head_mask is not None else None, encoder_hidden_states, encoder_attention_mask, init_cache, deterministic, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, )
class_definition
22,428
25,526
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,705
class FlaxRobertaPreLayerNormEncoder(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.layer = FlaxRobertaPreLayerNormLayerCollection( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return self.layer( hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
class_definition
25,634
26,916
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,706
class FlaxRobertaPreLayerNormPooler(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): cls_hidden_state = hidden_states[:, 0] cls_hidden_state = self.dense(cls_hidden_state) return nn.tanh(cls_hidden_state)
class_definition
27,023
27,572
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,707
class FlaxRobertaPreLayerNormLMHead(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.dense = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.decoder = nn.Dense( self.config.vocab_size, dtype=self.dtype, use_bias=False, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.dense(hidden_states) hidden_states = ACT2FN["gelu"](hidden_states) hidden_states = self.layer_norm(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) bias = jnp.asarray(self.bias, self.dtype) hidden_states += bias return hidden_states
class_definition
27,691
29,038
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,708
class FlaxRobertaPreLayerNormClassificationHead(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.out_proj = nn.Dense( self.config.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) def __call__(self, hidden_states, deterministic=True): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.dense(hidden_states) hidden_states = nn.tanh(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.out_proj(hidden_states) return hidden_states
class_definition
29,169
30,441
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,709
class FlaxRobertaPreLayerNormPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RobertaPreLayerNormConfig base_model_prefix = "roberta_prelayernorm" module_class: nn.Module = None def __init__( self, config: RobertaPreLayerNormConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, gradient_checkpointing: bool = False, **kwargs, ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") token_type_ids = jnp.ones_like(input_ids) position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False, ) else: module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False ) random_params = module_init_outputs["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length), dtype="i4") attention_mask = jnp.ones_like(input_ids, dtype="i4") position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, past_key_values: dict = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} if self.config.add_cross_attention: # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be # changed by FlaxRobertaPreLayerNormAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] else: outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, ) return outputs
class_definition
30,629
38,666
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,710
class FlaxRobertaPreLayerNormModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation add_pooling_layer: bool = True gradient_checkpointing: bool = False def setup(self): self.embeddings = FlaxRobertaPreLayerNormEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxRobertaPreLayerNormEncoder( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.pooler = FlaxRobertaPreLayerNormPooler(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # make sure `token_type_ids` is correctly initialized when not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) # make sure `position_ids` is correctly initialized when not passed if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) hidden_states = self.embeddings( input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic ) outputs = self.encoder( hidden_states, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.LayerNorm(hidden_states) pooled = self.pooler(hidden_states) if self.add_pooling_layer else None if not return_dict: # if pooled is None, don't return it if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, )
class_definition
38,669
41,610
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,711
class FlaxRobertaPreLayerNormModel(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormModule
class_definition
41,913
42,037
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,712
class FlaxRobertaPreLayerNormForMaskedLMModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.lm_head = FlaxRobertaPreLayerNormLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.roberta_prelayernorm.variables["params"]["embeddings"]["word_embeddings"][ "embedding" ] else: shared_embedding = None # Compute the prediction scores logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxMaskedLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
42,347
44,197
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,713
class FlaxRobertaPreLayerNormForMaskedLM(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForMaskedLMModule
class_definition
44,461
44,602
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,714
class FlaxRobertaPreLayerNormForSequenceClassificationModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) self.classifier = FlaxRobertaPreLayerNormClassificationHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output, deterministic=deterministic) if not return_dict: return (logits,) + outputs[1:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
44,951
46,553
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,715
class FlaxRobertaPreLayerNormForSequenceClassification(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForSequenceClassificationModule
class_definition
46,941
47,110
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,716
class FlaxRobertaPreLayerNormForMultipleChoiceModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
47,445
49,636
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,717
class FlaxRobertaPreLayerNormForMultipleChoice(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForMultipleChoiceModule
class_definition
50,025
50,178
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,718
class FlaxRobertaPreLayerNormForTokenClassificationModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
50,679
52,544
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,719
class FlaxRobertaPreLayerNormForTokenClassification(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForTokenClassificationModule
class_definition
52,936
53,099
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,720
class FlaxRobertaPreLayerNormForQuestionAnsweringModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.qa_outputs(hidden_states) start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
53,431
55,213
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,721
class FlaxRobertaPreLayerNormForQuestionAnswering(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForQuestionAnsweringModule
class_definition
55,661
55,820
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,722
class FlaxRobertaPreLayerNormForCausalLMModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.lm_head = FlaxRobertaPreLayerNormLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, token_type_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.roberta_prelayernorm.variables["params"]["embeddings"]["word_embeddings"][ "embedding" ] else: shared_embedding = None # Compute the prediction scores logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, )
class_definition
56,147
58,439
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,723
class FlaxRobertaPreLayerNormForCausalLM(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyway. # Thus, we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs
class_definition
58,800
60,373
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
null
8,724
class ByteRewriter: """ Byte rewriter class for MyT5 tokenizer. This class is used to rewrite bytes using a hash tree. The hash tree is constructed from a set of rewriting rules. Args: rewriting_rules (`str` or `Dict[str, str]`): A path to a json file containing the rewriting rules or a dictionary containing the rewriting rules. """ LEAF = "[LEAF]" def __init__(self, rewriting_rules: Union[str, Dict[str, str]]): if isinstance(rewriting_rules, str): with open(rewriting_rules, "r") as f: rewriting_rules = json.load(f) elif not isinstance(rewriting_rules, dict): raise ValueError( f"rewriting_rules should be either a path to json file or a dict, got {type(rewriting_rules)}" ) self.hash_tree = self.construct_hash_tree(rewriting_rules) reverse_rewriting_rules = {v: k for k, v in rewriting_rules.items()} self.reverse_hash_tree = self.construct_hash_tree(reverse_rewriting_rules) def add_leaf(self, hash_tree: Dict[str, Union[dict, List[str]]], byte_in_sequence: str, byte_out_sequence: str): """ Add a leaf with the output byte sequence to the hash tree. """ byte_in_list = byte_in_sequence.split(" ") byte_out_list = byte_out_sequence.split(" ") tree_pointer = hash_tree for b in byte_in_list: if b not in tree_pointer: tree_pointer[b] = {} tree_pointer = tree_pointer[b] tree_pointer[self.LEAF] = byte_out_list def construct_hash_tree(self, rewriting_rules: Dict[str, str]) -> Dict[str, Union[dict, List[str]]]: """ Construct a hash tree for rewritten byte sequences. """ hash_tree = defaultdict(dict) for b in (f"{x:02x}" for x in range(256)): hash_tree[b][self.LEAF] = [b] for in_sequence, out_sequence in rewriting_rules.items(): self.add_leaf(hash_tree, in_sequence, out_sequence) return hash_tree def search_hash_tree(self, byte_sequence: List[str]) -> Union[None, List[str]]: """ Search the hash tree and return the rewritten byte sequence if found. """ tree_pointer = self.hash_tree for b in byte_sequence: if b in tree_pointer: tree_pointer = tree_pointer[b] else: return None return tree_pointer[self.LEAF] def rewrite_bytes(self, in_bytes: List[str], reverse=False) -> List[str]: """ Rewrite a sequence of bytes using the hash tree. Args: in_bytes (`List[str]`): A list of bytes to be rewritten. reverse (`bool`): If True, decoding is performed with the reverse hash tree. Returns: `List[str]`: The rewritten byte sequence. """ out_bytes = [] b_start = 0 b_end = 0 while b_start < len(in_bytes): tree_pointer = self.hash_tree if not reverse else self.reverse_hash_tree for j in range(b_start, len(in_bytes)): b = in_bytes[j] if b in tree_pointer: tree_pointer = tree_pointer[b] elif j == b_start: cur_leaf = [b] b_end = j break else: break if self.LEAF in tree_pointer: cur_leaf = tree_pointer[self.LEAF] b_end = j out_bytes.extend(cur_leaf) b_start = b_end + 1 return out_bytes
class_definition
941
4,597
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/myt5/tokenization_myt5.py
null
8,725
class MyT5Tokenizer(PreTrainedTokenizer): """ Construct a MyT5 tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): The file containing the byte rewriting rules. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. extra_ids (`int`, *optional*, defaults to 125): Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary like in ByT5 preprocessing see [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)). additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. """ model_input_names = ["input_ids", "attention_mask"] vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, eos_token="</s>", unk_token="<unk>", pad_token="<pad>", extra_ids=125, additional_special_tokens=None, **kwargs, ) -> None: # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)] elif extra_ids > 0 and additional_special_tokens is not None and len(additional_special_tokens) > 0: # Check that we have the right number of extra_id special tokens extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens))) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to MyT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens" ) pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token # unk token needs to be in the vocab with correct index self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token} self.offset = len(self._added_tokens_decoder) self._utf_vocab_size = 2**8 # utf is 8 bits # Load byte maps self.byte_maps = json.load(open(vocab_file, "r")) self.decompose_rewriter = ByteRewriter(self.byte_maps["decompose_map"]) self.merge_rewriter = ByteRewriter(self.byte_maps["merge_map"]) super().__init__( eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=0, additional_special_tokens=additional_special_tokens, **kwargs, ) @property def vocab_size(self): return self._utf_vocab_size # Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.get_vocab def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)} vocab.update(self.added_tokens_encoder) return vocab # Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) # normal case: some special tokens if token_ids_1 is None: return ([0] * len(token_ids_0)) + [1] return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]: """Do not add eos again if user already added it.""" if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. MyT5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + eos + token_ids_1 + eos) * [0] # Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: `X </s>` - pair of sequences: `A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ token_ids_0 = self._add_eos_if_not_present(token_ids_0) if token_ids_1 is None: return token_ids_0 else: token_ids_1 = self._add_eos_if_not_present(token_ids_1) return token_ids_0 + token_ids_1 def _tokenize(self, text: str, **kwargs) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words. Represents tokens in two character hex format""" tokens = [f"{i:02x}" for i in text.encode("utf-8")] tokens = self.morphological_encode(tokens) return tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if len(token) != 2: token_id = None else: token_id = int(token, 16) + self.offset return token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = f"{index - self.offset:02x}" return token def morphological_encode(self, indices: List[str]) -> List[str]: # Decompose and merge morphological sequences indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=False) indices = self.merge_rewriter.rewrite_bytes(indices, reverse=False) return indices def morphological_decode(self, indices: List[str]) -> List[str]: # Demerge and compose morphological sequences indices = self.merge_rewriter.rewrite_bytes(indices, reverse=True) indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=True) return indices def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" bstring = b"" out_tokens = [] for token in tokens: if token in self.added_tokens_decoder: out_tokens.append(self.added_tokens_decoder[token]) elif token in self.added_tokens_encoder: out_tokens.append(token) else: out_tokens.append(token) out_tokens = self.morphological_decode(out_tokens) _added_tokens = set(self.added_tokens_decoder.values()) | set(self.added_tokens_encoder) for token in out_tokens: if token in _added_tokens: bstring += bytes(token, "utf-8") else: bstring += bytes.fromhex(token) string = bstring.decode("utf-8", errors="ignore") return string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: writer.write(json.dumps(self.byte_maps, indent=2, ensure_ascii=False)) return (vocab_file,)
class_definition
4,600
15,524
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/myt5/tokenization_myt5.py
null
8,726
class Wav2Vec2BertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Wav2Vec2BertModel`]. It is used to instantiate an Wav2Vec2Bert model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2Bert [facebook/wav2vec2-bert-rel-pos-large](https://huggingface.co/facebook/wav2vec2-bert-rel-pos-large) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*): Vocabulary size of the Wav2Vec2Bert model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Wav2Vec2BertModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`Wav2Vec2BertModel`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. feature_projection_input_dim (`int`, *optional*, defaults to 160): Input dimension of this model, i.e the dimension after processing input audios with [`SeamlessM4TFeatureExtractor`] or [`Wav2Vec2BertProcessor`]. hidden_act (`str` or `function`, *optional*, defaults to `"swish"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the feature projection. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`Wav2Vec2BertForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://arxiv.org/abs/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates `mask_time_prob*len(time_axis)/mask_time_length ``independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2): The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if `mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks`. mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates `mask_feature_prob*len(feature_axis)/mask_time_length` independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0): The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if `mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks`. ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`Wav2Vec2BertForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`Wav2Vec2BertForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`Wav2Vec2BertForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 768): Dimensionality of the projection before token mean-pooling for classification. tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. xvector_output_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. pad_token_id (`int`, *optional*, defaults to 0): The id of the _beginning-of-stream_ token. bos_token_id (`int`, *optional*, defaults to 1): The id of the _padding_ token. eos_token_id (`int`, *optional*, defaults to 2): The id of the _end-of-stream_ token. add_adapter (`bool`, *optional*, defaults to `False`): Whether a convolutional attention network should be stacked on top of the Wav2Vec2Bert Encoder. Can be very useful for warm-starting Wav2Vec2Bert for SpeechEncoderDecoder models. adapter_kernel_size (`int`, *optional*, defaults to 3): Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. adapter_stride (`int`, *optional*, defaults to 2): Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. num_adapter_layers (`int`, *optional*, defaults to 1): Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is True`. adapter_act (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the adapter layers. If string, `"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported. use_intermediate_ffn_before_adapter (`bool`, *optional*, defaults to `False`): Whether an intermediate feed-forward block should be stacked on top of the Wav2Vec2Bert Encoder and before the adapter network. Only relevant if `add_adapter is True`. output_hidden_size (`int`, *optional*): Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant if `add_adapter is True`. position_embeddings_type (`str`, *optional*, defaults to `"relative_key"`): Can be specified to : - `rotary`, for rotary position embeddings. - `relative`, for relative position embeddings. - `relative_key`, for relative position embeddings as defined by Shaw in [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). If left to `None`, no relative position embeddings is applied. rotary_embedding_base (`int`, *optional*, defaults to 10000): If `"rotary"` position embeddings are used, defines the size of the embedding base. max_source_positions (`int`, *optional*, defaults to 5000): if `"relative"` position embeddings are used, defines the maximum source input positions. left_max_position_embeddings (`int`, *optional*, defaults to 64): If `"relative_key"` (aka Shaw) position embeddings are used, defines the left clipping value for relative positions. right_max_position_embeddings (`int`, *optional*, defaults to 8): If `"relative_key"` (aka Shaw) position embeddings are used, defines the right clipping value for relative positions. conv_depthwise_kernel_size (`int`, *optional*, defaults to 31): Kernel size of convolutional depthwise 1D layer in Conformer blocks. conformer_conv_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all convolutional layers in Conformer blocks. Example: ```python >>> from transformers import Wav2Vec2BertConfig, Wav2Vec2BertModel >>> # Initializing a Wav2Vec2Bert facebook/wav2vec2-bert-rel-pos-large style configuration >>> configuration = Wav2Vec2BertConfig() >>> # Initializing a model (with random weights) from the facebook/wav2vec2-bert-rel-pos-large style configuration >>> model = Wav2Vec2BertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "wav2vec2-bert" def __init__( self, vocab_size=None, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, feature_projection_input_dim=160, hidden_act="swish", hidden_dropout=0.0, activation_dropout=0.0, attention_dropout=0.0, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction="sum", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=768, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, pad_token_id=0, bos_token_id=1, eos_token_id=2, add_adapter=False, adapter_kernel_size=3, adapter_stride=2, num_adapter_layers=1, adapter_act="relu", use_intermediate_ffn_before_adapter=False, output_hidden_size=None, position_embeddings_type="relative_key", rotary_embedding_base=10000, max_source_positions=5000, left_max_position_embeddings=64, right_max_position_embeddings=8, conv_depthwise_kernel_size=31, conformer_conv_dropout=0.1, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.feature_projection_input_dim = feature_projection_input_dim self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size self.use_weighted_layer_sum = use_weighted_layer_sum self.max_source_positions = max_source_positions if position_embeddings_type is not None and position_embeddings_type not in [ "rotary", "relative", "relative_key", ]: raise ValueError( """ `position_embeddings_type` is not valid. It must be one of the following values: `["rotary", "relative", "relative_key"]` or left as `None`. """ ) self.position_embeddings_type = position_embeddings_type self.rotary_embedding_base = rotary_embedding_base self.left_max_position_embeddings = left_max_position_embeddings self.right_max_position_embeddings = right_max_position_embeddings # Conformer-block related self.conv_depthwise_kernel_size = conv_depthwise_kernel_size self.conformer_conv_dropout = conformer_conv_dropout # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # adapter self.add_adapter = add_adapter self.adapter_kernel_size = adapter_kernel_size self.adapter_stride = adapter_stride self.num_adapter_layers = num_adapter_layers self.adapter_act = adapter_act self.output_hidden_size = output_hidden_size if output_hidden_size is not None else hidden_size if use_intermediate_ffn_before_adapter and not add_adapter: raise ValueError("`use_intermediate_ffn_before_adapter` is `True` but `add_adapter` is `False`.") self.use_intermediate_ffn_before_adapter = use_intermediate_ffn_before_adapter # SequenceClassification-specific parameter. Feel free to ignore for other classes. self.classifier_proj_size = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. self.tdnn_dim = list(tdnn_dim) self.tdnn_kernel = list(tdnn_kernel) self.tdnn_dilation = list(tdnn_dilation) self.xvector_output_dim = xvector_output_dim @property def inputs_to_logits_ratio(self): ratio = self.feature_projection_input_dim * 2 if self.add_adapter: ratio = ratio * (self.adapter_stride**self.num_adapter_layers) return ratio
class_definition
813
18,074
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py
null
8,727
class Wav2Vec2BertProcessorKwargs(ProcessingKwargs, total=False): _defaults = {}
class_definition
1,021
1,105
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py
null
8,728
class Wav2Vec2BertProcessor(ProcessorMixin): r""" Constructs a Wav2Vec2-BERT processor which wraps a Wav2Vec2-BERT feature extractor and a Wav2Vec2 CTC tokenizer into a single processor. [`Wav2Vec2Processor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and [`PreTrainedTokenizer`]. See the docstring of [`~Wav2Vec2Processor.__call__`] and [`~Wav2Vec2Processor.decode`] for more information. Args: feature_extractor (`SeamlessM4TFeatureExtractor`): An instance of [`SeamlessM4TFeatureExtractor`]. The feature extractor is a required input. tokenizer ([`PreTrainedTokenizer`]): An instance of [`PreTrainedTokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "SeamlessM4TFeatureExtractor" tokenizer_class = "AutoTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): try: return super().from_pretrained(pretrained_model_name_or_path, **kwargs) except OSError: warnings.warn( f"Loading a tokenizer inside {cls.__name__} from a config that does not" " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: ", FutureWarning, ) feature_extractor = SeamlessM4TFeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs) tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(feature_extractor=feature_extractor, tokenizer=tokenizer) def __call__( self, audio: AudioInput = None, text: Optional[Union[str, List[str], TextInput, PreTokenizedInput]] = None, images=None, videos=None, **kwargs: Unpack[Wav2Vec2BertProcessorKwargs], ): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `audio` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audio` is not `None` to pre-process the audio. To prepare the target sequences(s), this method forwards the `text` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.__call__`] if `text` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: audio (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_features** -- Audio input features to be fed to a model. Returned when `audio` is not `None`. - **attention_mask** -- List of indices specifying which timestamps should be attended to by the model when `audio` is not `None`. When only `text` is specified, returns the token attention mask. - **labels** -- List of token ids to be fed to a model. Returned when both `text` and `audio` are not `None`. - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None` and `audio` is `None`. """ if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") output_kwargs = self._merge_kwargs( Wav2Vec2BertProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if audio is not None: inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"]) if text is not None: encodings = self.tokenizer(text, **output_kwargs["text_kwargs"]) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def pad(self, input_features=None, labels=None, **kwargs): """ If `input_features` is not `None`, this method forwards the `input_features` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.pad`] to pad the input features. If `labels` is not `None`, this method forwards the `labels` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.pad`] to pad the label(s). Please refer to the doctsring of the above two methods for more information. """ if input_features is None and labels is None: raise ValueError("You need to specify either an `input_features` or `labels` input to pad.") if input_features is not None: input_features = self.feature_extractor.pad(input_features, **kwargs) if labels is not None: labels = self.tokenizer.pad(labels, **kwargs) if labels is None: return input_features elif input_features is None: return labels else: input_features["labels"] = labels["input_ids"] return input_features def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs)
class_definition
1,108
7,843
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py
null
8,729
class Wav2Vec2BertRotaryPositionalEmbedding(nn.Module): """Rotary positional embedding Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf """ def __init__(self, config): super().__init__() dim = config.hidden_size // config.num_attention_heads base = config.rotary_embedding_base inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)) # Ignore copy self.register_buffer("inv_freq", inv_freq, persistent=False) self.cached_sequence_length = None self.cached_rotary_positional_embedding = None def forward(self, hidden_states): sequence_length = hidden_states.shape[1] if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None: return self.cached_rotary_positional_embedding self.cached_sequence_length = sequence_length # Embeddings are computed in the dtype of the inv_freq constant time_stamps = torch.arange(sequence_length).type_as(self.inv_freq) freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq) embeddings = torch.cat((freqs, freqs), dim=-1) cos_embeddings = embeddings.cos()[:, None, None, :] sin_embeddings = embeddings.sin()[:, None, None, :] # Computed embeddings are cast to the dtype of the hidden state inputs self.cached_rotary_positional_embedding = torch.stack([cos_embeddings, sin_embeddings]).type_as(hidden_states) return self.cached_rotary_positional_embedding
class_definition
10,179
11,794
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,730
class Wav2Vec2BertRelPositionalEmbedding(nn.Module): """Relative positional encoding module.""" def __init__(self, config): super().__init__() self.max_len = config.max_source_positions self.d_model = config.hidden_size self.pe = None self.extend_pe(torch.tensor(0.0).expand(1, self.max_len)) def extend_pe(self, x): # Reset the positional encodings if self.pe is not None: # self.pe contains both positive and negative parts # the length of self.pe is 2 * input_len - 1 if self.pe.size(1) >= x.size(1) * 2 - 1: if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return # Suppose `i` is the position of query vector and `j` is the # position of key vector. We use positive relative positions when keys # are to the left (i>j) and negative relative positions otherwise (i<j). pe_positive = torch.zeros(x.size(1), self.d_model) pe_negative = torch.zeros(x.size(1), self.d_model) position = torch.arange(0, x.size(1), dtype=torch.int64).float().unsqueeze(1) div_term = torch.exp( torch.arange(0, self.d_model, 2, dtype=torch.int64).float() * -(math.log(10000.0) / self.d_model) ) pe_positive[:, 0::2] = torch.sin(position * div_term) pe_positive[:, 1::2] = torch.cos(position * div_term) pe_negative[:, 0::2] = torch.sin(-1 * position * div_term) pe_negative[:, 1::2] = torch.cos(-1 * position * div_term) # Reverse the order of positive indices and concat both positive and # negative indices. This is used to support the shifting trick # as in https://arxiv.org/abs/1901.02860 pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0) pe_negative = pe_negative[1:].unsqueeze(0) pe = torch.cat([pe_positive, pe_negative], dim=1) self.pe = pe.to(device=x.device, dtype=x.dtype) def forward(self, hidden_states: torch.Tensor): self.extend_pe(hidden_states) start_idx = self.pe.size(1) // 2 - hidden_states.size(1) + 1 end_idx = self.pe.size(1) // 2 + hidden_states.size(1) relative_position_embeddings = self.pe[:, start_idx:end_idx] return relative_position_embeddings
class_definition
11,955
14,348
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,731
class Wav2Vec2BertFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps) self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states
class_definition
14,351
15,039
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,732
class Wav2Vec2BertFeedForward(nn.Module): def __init__(self, config, act_fn=None, hidden_size=None): super().__init__() act_fn = act_fn if act_fn is not None else config.hidden_act hidden_size = hidden_size if hidden_size is not None else config.hidden_size self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(hidden_size, config.intermediate_size) self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn self.output_dense = nn.Linear(config.intermediate_size, hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward.forward def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states
class_definition
15,042
16,186
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,733
class Wav2Vec2BertConvolutionModule(nn.Module): """Convolution block used in the conformer block""" def __init__(self, config): super().__init__() if (config.conv_depthwise_kernel_size - 1) % 2 == 1: raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding") self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pointwise_conv1 = nn.Conv1d( config.hidden_size, 2 * config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False, ) self.glu = nn.GLU(dim=1) self.depthwise_conv = nn.Conv1d( config.hidden_size, config.hidden_size, config.conv_depthwise_kernel_size, stride=1, padding=0, groups=config.hidden_size, bias=False, ) self.depthwise_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.activation = ACT2FN[config.hidden_act] self.pointwise_conv2 = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False, ) self.dropout = nn.Dropout(config.conformer_conv_dropout) def forward(self, hidden_states, attention_mask=None): hidden_states = self.layer_norm(hidden_states) # Ensure that we do not leak padded positions in depthwise convolution if attention mask is passed. # Put 0 where necessary if attention_mask is not None: hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0) # exchange the temporal dimension and the feature dimension hidden_states = hidden_states.transpose(1, 2) # GLU mechanism # => (batch, 2*channel, dim) hidden_states = self.pointwise_conv1(hidden_states) # => (batch, channel, dim) hidden_states = self.glu(hidden_states) # Pad the sequence entirely on the left because of causal convolution. hidden_states = torch.nn.functional.pad(hidden_states, (self.depthwise_conv.kernel_size[0] - 1, 0)) # 1D Depthwise Conv hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.depthwise_layer_norm(hidden_states.transpose(1, 2)).transpose(1, 2) hidden_states = self.activation(hidden_states) hidden_states = self.pointwise_conv2(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states
class_definition
16,189
18,900
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,734
class Wav2Vec2BertSelfAttention(nn.Module): """Construct an Wav2Vec2BertSelfAttention object. Can be enhanced with rotary or relative position embeddings. """ def __init__(self, config, is_adapter_attention=False): super().__init__() hidden_size = config.hidden_size if not is_adapter_attention else config.output_hidden_size self.head_size = hidden_size // config.num_attention_heads self.num_heads = config.num_attention_heads self.position_embeddings_type = config.position_embeddings_type if not is_adapter_attention else None self.linear_q = nn.Linear(hidden_size, hidden_size) self.linear_k = nn.Linear(hidden_size, hidden_size) self.linear_v = nn.Linear(hidden_size, hidden_size) self.linear_out = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(p=config.attention_dropout) if self.position_embeddings_type == "relative": # linear transformation for positional encoding self.linear_pos = nn.Linear(hidden_size, hidden_size, bias=False) # these two learnable bias are used in matrix c and matrix d # as described in https://arxiv.org/abs/1901.02860 Section 3.3 self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) if self.position_embeddings_type == "relative_key": self.left_max_position_embeddings = config.left_max_position_embeddings self.right_max_position_embeddings = config.right_max_position_embeddings num_positions = self.left_max_position_embeddings + self.right_max_position_embeddings + 1 self.distance_embedding = nn.Embedding(num_positions, self.head_size) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, relative_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # self-attention mechanism batch_size, sequence_length, hidden_size = hidden_states.size() # make sure query/key states can be != value states query_key_states = hidden_states value_states = hidden_states if self.position_embeddings_type == "rotary": if relative_position_embeddings is None: raise ValueError( "`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'rotary'" ) query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings) # project query_key_states and value_states query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size) # => (batch, head, time1, d_k) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) if self.position_embeddings_type == "relative": if relative_position_embeddings is None: raise ValueError( "`relative_position_embeddings` has to be defined when `self.position_embeddings_type ==" " 'relative'" ) # apply relative_position_embeddings to qk scores # as proposed in Transformer_XL: https://arxiv.org/abs/1901.02860 scores = self._apply_relative_embeddings( query=query, key=key, relative_position_embeddings=relative_position_embeddings ) else: scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size) if self.position_embeddings_type == "relative_key": query_length, key_length = query.shape[2], key.shape[2] position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_r - position_ids_l distance = torch.clamp(distance, -self.left_max_position_embeddings, self.right_max_position_embeddings) positional_embedding = self.distance_embedding(distance + self.left_max_position_embeddings) positional_embedding = positional_embedding.to(dtype=query.dtype) # fp16 compatibility relative_position_attn_weights = torch.einsum("bhld,lrd->bhlr", query, positional_embedding) scores = scores + (relative_position_attn_weights / math.sqrt(self.head_size)) # apply attention_mask if necessary if attention_mask is not None: scores = scores + attention_mask # => (batch, head, time1, time2) probs = torch.softmax(scores, dim=-1) probs = self.dropout(probs) # => (batch, head, time1, d_k) hidden_states = torch.matmul(probs, value) # => (batch, time1, hidden_size) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size) hidden_states = self.linear_out(hidden_states) return hidden_states, probs # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention._apply_rotary_embedding def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings): batch_size, sequence_length, hidden_size = hidden_states.size() hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size) cos = relative_position_embeddings[0, :sequence_length, ...] sin = relative_position_embeddings[1, :sequence_length, ...] # rotate hidden_states with rotary embeddings hidden_states = hidden_states.transpose(0, 1) rotated_states_begin = hidden_states[..., : self.head_size // 2] rotated_states_end = hidden_states[..., self.head_size // 2 :] rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1) hidden_states = (hidden_states * cos) + (rotated_states * sin) hidden_states = hidden_states.transpose(0, 1) hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads * self.head_size) return hidden_states # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention._apply_relative_embeddings def _apply_relative_embeddings(self, query, key, relative_position_embeddings): # 1. project positional embeddings # => (batch, head, 2*time1-1, d_k) proj_relative_position_embeddings = self.linear_pos(relative_position_embeddings) proj_relative_position_embeddings = proj_relative_position_embeddings.view( relative_position_embeddings.size(0), -1, self.num_heads, self.head_size ) proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(1, 2) proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(2, 3) # 2. Add bias to query # => (batch, head, time1, d_k) query = query.transpose(1, 2) q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2) q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2) # 3. attention score: first compute matrix a and matrix c # as described in https://arxiv.org/abs/1901.02860 Section 3.3 # => (batch, head, time1, time2) scores_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1)) # 4. then compute matrix b and matrix d # => (batch, head, time1, 2*time1-1) scores_bd = torch.matmul(q_with_bias_v, proj_relative_position_embeddings) # 5. shift matrix b and matrix d zero_pad = torch.zeros((*scores_bd.size()[:3], 1), device=scores_bd.device, dtype=scores_bd.dtype) scores_bd_padded = torch.cat([zero_pad, scores_bd], dim=-1) scores_bd_padded_shape = scores_bd.size()[:2] + (scores_bd.shape[3] + 1, scores_bd.shape[2]) scores_bd_padded = scores_bd_padded.view(*scores_bd_padded_shape) scores_bd = scores_bd_padded[:, :, 1:].view_as(scores_bd) scores_bd = scores_bd[:, :, :, : scores_bd.size(-1) // 2 + 1] # 6. sum matrices # => (batch, head, time1, time2) scores = (scores_ac + scores_bd) / math.sqrt(self.head_size) return scores
class_definition
18,903
27,718
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,735
class Wav2Vec2BertEncoderLayer(nn.Module): """Conformer block based on https://arxiv.org/abs/2005.08100.""" def __init__(self, config): super().__init__() embed_dim = config.hidden_size dropout = config.attention_dropout # Feed-forward 1 self.ffn1_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.ffn1 = Wav2Vec2BertFeedForward(config) # Self-Attention self.self_attn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.self_attn_dropout = nn.Dropout(dropout) self.self_attn = Wav2Vec2BertSelfAttention(config) # Conformer Convolution self.conv_module = Wav2Vec2BertConvolutionModule(config) # Feed-forward 2 self.ffn2_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.ffn2 = Wav2Vec2BertFeedForward(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states, attention_mask: Optional[torch.Tensor] = None, relative_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, conv_attention_mask: Optional[torch.Tensor] = None, ): hidden_states = hidden_states # 1. Feed-Forward 1 layer residual = hidden_states hidden_states = self.ffn1_layer_norm(hidden_states) hidden_states = self.ffn1(hidden_states) hidden_states = hidden_states * 0.5 + residual residual = hidden_states # 2. Self-Attention layer hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weigts = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, relative_position_embeddings=relative_position_embeddings, output_attentions=output_attentions, ) hidden_states = self.self_attn_dropout(hidden_states) hidden_states = hidden_states + residual # 3. Convolutional Layer residual = hidden_states hidden_states = self.conv_module(hidden_states, attention_mask=conv_attention_mask) hidden_states = residual + hidden_states # 4. Feed-Forward 2 Layer residual = hidden_states hidden_states = self.ffn2_layer_norm(hidden_states) hidden_states = self.ffn2(hidden_states) hidden_states = hidden_states * 0.5 + residual hidden_states = self.final_layer_norm(hidden_states) return hidden_states, attn_weigts
class_definition
27,721
30,312
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,736
class Wav2Vec2BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config if config.position_embeddings_type == "relative": self.embed_positions = Wav2Vec2BertRelPositionalEmbedding(config) elif config.position_embeddings_type == "rotary": self.embed_positions = Wav2Vec2BertRotaryPositionalEmbedding(config) else: self.embed_positions = None self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([Wav2Vec2BertEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None conv_attention_mask = attention_mask if attention_mask is not None: # make sure padded tokens output 0 hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0) # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) hidden_states = self.dropout(hidden_states) if self.embed_positions is not None: relative_position_embeddings = self.embed_positions(hidden_states) else: relative_position_embeddings = None synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, relative_position_embeddings, output_attentions, conv_attention_mask, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, relative_position_embeddings=relative_position_embeddings, output_attentions=output_attentions, conv_attention_mask=conv_attention_mask, ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, )
class_definition
30,315
34,230
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,737
class Wav2Vec2BertAdapter(nn.Module): def __init__(self, config): super().__init__() # feature dim might need to be down-projected if config.output_hidden_size != config.hidden_size: self.proj = nn.Linear(config.hidden_size, config.output_hidden_size) self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size, eps=config.layer_norm_eps) else: self.proj = self.proj_layer_norm = None self.layers = nn.ModuleList(Wav2Vec2BertAdapterLayer(config) for _ in range(config.num_adapter_layers)) self.layerdrop = config.layerdrop self.kernel_size = config.adapter_kernel_size self.stride = config.adapter_stride def _compute_sub_sample_lengths_from_attention_mask(self, seq_lens): if seq_lens is None: return seq_lens pad = self.kernel_size // 2 seq_lens = ((seq_lens + 2 * pad - self.kernel_size) / self.stride) + 1 return seq_lens.floor() def forward(self, hidden_states, attention_mask=None): # down project hidden_states if necessary if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) sub_sampled_lengths = None if attention_mask is not None: sub_sampled_lengths = (attention_mask.size(1) - (1 - attention_mask.int()).sum(1)).to(hidden_states.device) for layer in self.layers: layerdrop_prob = torch.rand([]) sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(sub_sampled_lengths) if not self.training or (layerdrop_prob > self.layerdrop): hidden_states = layer( hidden_states, attention_mask=attention_mask, sub_sampled_lengths=sub_sampled_lengths ) return hidden_states
class_definition
34,233
36,167
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,738
class Wav2Vec2BertAdapterLayer(nn.Module): def __init__(self, config): super().__init__() embed_dim = config.output_hidden_size dropout = config.conformer_conv_dropout self.kernel_size = config.adapter_kernel_size self.stride = config.adapter_stride # 1. residual convolution self.residual_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.residual_conv = nn.Conv1d( embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2, ) self.activation = nn.GLU(dim=1) # Self-Attention self.self_attn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.self_attn_conv = nn.Conv1d( embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2, ) self.self_attn = Wav2Vec2BertSelfAttention(config, is_adapter_attention=True) self.self_attn_dropout = nn.Dropout(dropout) # Feed-forward self.ffn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.ffn = Wav2Vec2BertFeedForward(config, act_fn=config.adapter_act, hidden_size=embed_dim) def forward( self, hidden_states, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, sub_sampled_lengths: Optional[torch.Tensor] = None, ): residual = self.residual_layer_norm(hidden_states) # Apply pooling to the residual to match the sequence length of the # multi-head attention output. # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len) residual = residual.transpose(1, 2) residual = self.residual_conv(residual) residual = self.activation(residual) # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim) residual = residual.transpose(1, 2) hidden_states = self.self_attn_layer_norm(hidden_states) # Apply pooling before feeding to the multihead-attention layer. # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.self_attn_conv(hidden_states) hidden_states = self.activation(hidden_states) # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim) hidden_states = hidden_states.transpose(1, 2) if attention_mask is not None: attention_mask = _compute_new_attention_mask(hidden_states=hidden_states, seq_lens=sub_sampled_lengths) attention_mask = _prepare_4d_attention_mask( attention_mask, hidden_states.dtype, ) # The rest of the computation is identical to a vanilla Transformer # encoder layer. hidden_states, attn_weigths = self.self_attn( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = self.self_attn_dropout(hidden_states) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.ffn_layer_norm(hidden_states) hidden_states = self.ffn(hidden_states) + residual return hidden_states
class_definition
36,170
39,586
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,739
class Wav2Vec2BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Wav2Vec2BertConfig base_model_prefix = "wav2vec2_bert" main_input_name = "input_features" supports_gradient_checkpointing = True # Ignore copy def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Wav2Vec2BertSelfAttention): if hasattr(module, "pos_bias_u"): nn.init.xavier_uniform_(module.pos_bias_u) if hasattr(module, "pos_bias_v"): nn.init.xavier_uniform_(module.pos_bias_v) elif isinstance(module, Wav2Vec2BertFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) # Ignore copy def _get_feat_extract_output_lengths( self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride, padding): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length + 2 * padding - kernel_size, stride, rounding_mode="floor") + 1 if add_adapter: padding = self.config.adapter_kernel_size // 2 for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length( input_lengths, self.config.adapter_kernel_size, self.config.adapter_stride, padding ) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) output_lengths = output_lengths.to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask
class_definition
39,804
43,442
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,740
class Wav2Vec2BertModel(Wav2Vec2BertPreTrainedModel): def __init__(self, config: Wav2Vec2BertConfig): super().__init__(config) self.config = config self.feature_projection = Wav2Vec2BertFeatureProjection(config) # model only needs masking vector if mask prob is > 0.0 if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_()) self.encoder = Wav2Vec2BertEncoder(config) self.adapter = Wav2Vec2BertAdapter(config) if config.add_adapter else None self.intermediate_ffn = None if config.use_intermediate_ffn_before_adapter: self.intermediate_ffn = Wav2Vec2BertFeedForward(config, act_fn="relu") # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_PRETRAINED_CHECKPOINT_FOR_DOC, output_type=Wav2Vec2BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Wav2Vec2BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states, extract_features = self.feature_projection(input_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.intermediate_ffn: expanded_hidden_states = self.intermediate_ffn(hidden_states) hidden_states = hidden_states + 0.5 * expanded_hidden_states if self.adapter is not None: hidden_states = self.adapter(hidden_states, attention_mask=attention_mask) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return Wav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
class_definition
46,195
51,663
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,741
class Wav2Vec2BertForCTC(Wav2Vec2BertPreTrainedModel): # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForCTC.__init__ with Wav2Vec2Conformer->Wav2Vec2Bert,WAV2VEC2_CONFORMER->WAV2VEC2_BERT,wav2vec2_conformer->wav2vec2_bert def __init__(self, config, target_lang: Optional[str] = None): super().__init__(config) self.wav2vec2_bert = Wav2Vec2BertModel(config) self.dropout = nn.Dropout(config.final_dropout) self.target_lang = target_lang if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `Wav2Vec2BertForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_PRETRAINED_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS, ) def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ if labels is not None and labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones(input_features.shape[:2], device=input_features.device, dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum([-1])).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions )
class_definition
51,844
56,613
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,742
class Wav2Vec2BertForSequenceClassification(Wav2Vec2BertPreTrainedModel): # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.__init__ with Wav2Vec2->Wav2Vec2Bert,wav2vec2->wav2vec2_bert def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of Wav2Vec2Bert adapters (config.add_adapter=True)" ) self.wav2vec2_bert = Wav2Vec2BertModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wav2vec2_bert.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_BASE_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with Wav2Vec2->Wav2Vec2Bert,wav2vec2->wav2vec2_bert,WAV_2_VEC_2->WAV2VEC2_BERT, input_values->input_features def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
56,843
61,471
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,743
class Wav2Vec2BertForAudioFrameClassification(Wav2Vec2BertPreTrainedModel): # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForAudioFrameClassification.__init__ with Wav2Vec2Conformer->Wav2Vec2Bert,WAV2VEC2_CONFORMER->WAV2VEC2_BERT,wav2vec2_conformer->wav2vec2_bert def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Audio frame classification does not support the use of Wav2Vec2Bert adapters (config.add_adapter=True)" ) self.wav2vec2_bert = Wav2Vec2BertModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.num_labels = config.num_labels self.init_weights() # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForAudioFrameClassification.freeze_base_model with wav2vec2_conformer->wav2vec2_bert def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wav2vec2_bert.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_BASE_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", ) # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForAudioFrameClassification.forward with wav2vec2_conformer->wav2vec2_bert, input_values->input_features def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
61,650
65,914
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,744
class AMSoftmaxLoss(nn.Module): def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4): super(AMSoftmaxLoss, self).__init__() self.scale = scale self.margin = margin self.num_labels = num_labels self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True) self.loss = nn.CrossEntropyLoss() def forward(self, hidden_states, labels): labels = labels.flatten() weight = nn.functional.normalize(self.weight, dim=0) hidden_states = nn.functional.normalize(hidden_states, dim=1) cos_theta = torch.mm(hidden_states, weight) psi = cos_theta - self.margin onehot = nn.functional.one_hot(labels, self.num_labels) logits = self.scale * torch.where(onehot.bool(), psi, cos_theta) loss = self.loss(logits, labels) return loss
class_definition
65,992
66,868
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,745
class TDNNLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id] self.out_conv_dim = config.tdnn_dim[layer_id] self.kernel_size = config.tdnn_kernel[layer_id] self.dilation = config.tdnn_dilation[layer_id] self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim) self.activation = nn.ReLU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if is_peft_available(): from peft.tuners.lora import LoraLayer if isinstance(self.kernel, LoraLayer): warnings.warn( "Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. " "You should exclude TDNNLayer from LoRA's target modules.", ) # for backward compatibility, we keep nn.Linear but call F.conv1d for speed up hidden_states = hidden_states.transpose(1, 2) weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2) hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.activation(hidden_states) return hidden_states
class_definition
66,942
68,372
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,746
class Wav2Vec2BertForXVector(Wav2Vec2BertPreTrainedModel): # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector.__init__ with Wav2Vec2Conformer->Wav2Vec2Bert,WAV2VEC2_CONFORMER->WAV2VEC2_BERT,wav2vec2_conformer->wav2vec2_bert def __init__(self, config): super().__init__(config) self.wav2vec2_bert = Wav2Vec2BertModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0]) tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))] self.tdnn = nn.ModuleList(tdnn_layers) self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim) self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim) self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels) self.init_weights() # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector.freeze_base_model with wav2vec2_conformer->wav2vec2_bert def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wav2vec2_bert.parameters(): param.requires_grad = False # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector._get_tdnn_output_lengths def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the TDNN layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size in self.config.tdnn_kernel: input_lengths = _conv_out_length(input_lengths, kernel_size, 1) return input_lengths @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_BASE_CHECKPOINT_FOR_DOC, output_type=XVectorOutput, config_class=_CONFIG_FOR_DOC, modality="audio", ) # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector.forward with wav2vec2_conformer->wav2vec2_bert, input_values->input_features def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, XVectorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) for tdnn_layer in self.tdnn: hidden_states = tdnn_layer(hidden_states) # Statistic Pooling if attention_mask is None: mean_features = hidden_states.mean(dim=1) std_features = hidden_states.std(dim=1) else: feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1)) tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths) mean_features = [] std_features = [] for i, length in enumerate(tdnn_output_lengths): mean_features.append(hidden_states[i, :length].mean(dim=0)) std_features.append(hidden_states[i, :length].std(dim=0)) mean_features = torch.stack(mean_features) std_features = torch.stack(std_features) statistic_pooling = torch.cat([mean_features, std_features], dim=-1) output_embeddings = self.feature_extractor(statistic_pooling) logits = self.classifier(output_embeddings) loss = None if labels is not None: loss = self.objective(logits, labels) if not return_dict: output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return XVectorOutput( loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
68,559
74,650
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
null
8,747
class LevitForImageClassificationWithTeacherOutput(ModelOutput): """ Output type of [`LevitForImageClassificationWithTeacher`]. Args: logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Prediction scores as the average of the `cls_logits` and `distillation_logits`. cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the class token). distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the distillation token). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. """ logits: torch.FloatTensor = None cls_logits: torch.FloatTensor = None distillation_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None
class_definition
1,628
3,066
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,748
class LevitConvEmbeddings(nn.Module): """ LeViT Conv Embeddings with Batch Norm, used in the initial patch embedding layer. """ def __init__( self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bn_weight_init=1 ): super().__init__() self.convolution = nn.Conv2d( in_channels, out_channels, kernel_size, stride, padding, dilation=dilation, groups=groups, bias=False ) self.batch_norm = nn.BatchNorm2d(out_channels) def forward(self, embeddings): embeddings = self.convolution(embeddings) embeddings = self.batch_norm(embeddings) return embeddings
class_definition
3,069
3,749
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,749
class LevitPatchEmbeddings(nn.Module): """ LeViT patch embeddings, for final embeddings to be passed to transformer blocks. It consists of multiple `LevitConvEmbeddings`. """ def __init__(self, config): super().__init__() self.embedding_layer_1 = LevitConvEmbeddings( config.num_channels, config.hidden_sizes[0] // 8, config.kernel_size, config.stride, config.padding ) self.activation_layer_1 = nn.Hardswish() self.embedding_layer_2 = LevitConvEmbeddings( config.hidden_sizes[0] // 8, config.hidden_sizes[0] // 4, config.kernel_size, config.stride, config.padding ) self.activation_layer_2 = nn.Hardswish() self.embedding_layer_3 = LevitConvEmbeddings( config.hidden_sizes[0] // 4, config.hidden_sizes[0] // 2, config.kernel_size, config.stride, config.padding ) self.activation_layer_3 = nn.Hardswish() self.embedding_layer_4 = LevitConvEmbeddings( config.hidden_sizes[0] // 2, config.hidden_sizes[0], config.kernel_size, config.stride, config.padding ) self.num_channels = config.num_channels def forward(self, pixel_values): num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.embedding_layer_1(pixel_values) embeddings = self.activation_layer_1(embeddings) embeddings = self.embedding_layer_2(embeddings) embeddings = self.activation_layer_2(embeddings) embeddings = self.embedding_layer_3(embeddings) embeddings = self.activation_layer_3(embeddings) embeddings = self.embedding_layer_4(embeddings) return embeddings.flatten(2).transpose(1, 2)
class_definition
3,752
5,666
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,750
class MLPLayerWithBN(nn.Module): def __init__(self, input_dim, output_dim, bn_weight_init=1): super().__init__() self.linear = nn.Linear(in_features=input_dim, out_features=output_dim, bias=False) self.batch_norm = nn.BatchNorm1d(output_dim) def forward(self, hidden_state): hidden_state = self.linear(hidden_state) hidden_state = self.batch_norm(hidden_state.flatten(0, 1)).reshape_as(hidden_state) return hidden_state
class_definition
5,669
6,145
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,751
class LevitSubsample(nn.Module): def __init__(self, stride, resolution): super().__init__() self.stride = stride self.resolution = resolution def forward(self, hidden_state): batch_size, _, channels = hidden_state.shape hidden_state = hidden_state.view(batch_size, self.resolution, self.resolution, channels)[ :, :: self.stride, :: self.stride ].reshape(batch_size, -1, channels) return hidden_state
class_definition
6,148
6,624
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,752
class LevitAttention(nn.Module): def __init__(self, hidden_sizes, key_dim, num_attention_heads, attention_ratio, resolution): super().__init__() self.num_attention_heads = num_attention_heads self.scale = key_dim**-0.5 self.key_dim = key_dim self.attention_ratio = attention_ratio self.out_dim_keys_values = attention_ratio * key_dim * num_attention_heads + key_dim * num_attention_heads * 2 self.out_dim_projection = attention_ratio * key_dim * num_attention_heads self.queries_keys_values = MLPLayerWithBN(hidden_sizes, self.out_dim_keys_values) self.activation = nn.Hardswish() self.projection = MLPLayerWithBN(self.out_dim_projection, hidden_sizes, bn_weight_init=0) points = list(itertools.product(range(resolution), range(resolution))) len_points = len(points) attention_offsets, indices = {}, [] for p1 in points: for p2 in points: offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) if offset not in attention_offsets: attention_offsets[offset] = len(attention_offsets) indices.append(attention_offsets[offset]) self.attention_bias_cache = {} self.attention_biases = torch.nn.Parameter(torch.zeros(num_attention_heads, len(attention_offsets))) self.register_buffer( "attention_bias_idxs", torch.LongTensor(indices).view(len_points, len_points), persistent=False ) @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device): if self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, hidden_state): batch_size, seq_length, _ = hidden_state.shape queries_keys_values = self.queries_keys_values(hidden_state) query, key, value = queries_keys_values.view(batch_size, seq_length, self.num_attention_heads, -1).split( [self.key_dim, self.key_dim, self.attention_ratio * self.key_dim], dim=3 ) query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) attention = query @ key.transpose(-2, -1) * self.scale + self.get_attention_biases(hidden_state.device) attention = attention.softmax(dim=-1) hidden_state = (attention @ value).transpose(1, 2).reshape(batch_size, seq_length, self.out_dim_projection) hidden_state = self.projection(self.activation(hidden_state)) return hidden_state
class_definition
6,627
9,611
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,753
class LevitAttentionSubsample(nn.Module): def __init__( self, input_dim, output_dim, key_dim, num_attention_heads, attention_ratio, stride, resolution_in, resolution_out, ): super().__init__() self.num_attention_heads = num_attention_heads self.scale = key_dim**-0.5 self.key_dim = key_dim self.attention_ratio = attention_ratio self.out_dim_keys_values = attention_ratio * key_dim * num_attention_heads + key_dim * num_attention_heads self.out_dim_projection = attention_ratio * key_dim * num_attention_heads self.resolution_out = resolution_out # resolution_in is the intial resolution, resoloution_out is final resolution after downsampling self.keys_values = MLPLayerWithBN(input_dim, self.out_dim_keys_values) self.queries_subsample = LevitSubsample(stride, resolution_in) self.queries = MLPLayerWithBN(input_dim, key_dim * num_attention_heads) self.activation = nn.Hardswish() self.projection = MLPLayerWithBN(self.out_dim_projection, output_dim) self.attention_bias_cache = {} points = list(itertools.product(range(resolution_in), range(resolution_in))) points_ = list(itertools.product(range(resolution_out), range(resolution_out))) len_points, len_points_ = len(points), len(points_) attention_offsets, indices = {}, [] for p1 in points_: for p2 in points: size = 1 offset = (abs(p1[0] * stride - p2[0] + (size - 1) / 2), abs(p1[1] * stride - p2[1] + (size - 1) / 2)) if offset not in attention_offsets: attention_offsets[offset] = len(attention_offsets) indices.append(attention_offsets[offset]) self.attention_biases = torch.nn.Parameter(torch.zeros(num_attention_heads, len(attention_offsets))) self.register_buffer( "attention_bias_idxs", torch.LongTensor(indices).view(len_points_, len_points), persistent=False ) @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device): if self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, hidden_state): batch_size, seq_length, _ = hidden_state.shape key, value = ( self.keys_values(hidden_state) .view(batch_size, seq_length, self.num_attention_heads, -1) .split([self.key_dim, self.attention_ratio * self.key_dim], dim=3) ) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) query = self.queries(self.queries_subsample(hidden_state)) query = query.view(batch_size, self.resolution_out**2, self.num_attention_heads, self.key_dim).permute( 0, 2, 1, 3 ) attention = query @ key.transpose(-2, -1) * self.scale + self.get_attention_biases(hidden_state.device) attention = attention.softmax(dim=-1) hidden_state = (attention @ value).transpose(1, 2).reshape(batch_size, -1, self.out_dim_projection) hidden_state = self.projection(self.activation(hidden_state)) return hidden_state
class_definition
9,614
13,300
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,754
class LevitMLPLayer(nn.Module): """ MLP Layer with `2X` expansion in contrast to ViT with `4X`. """ def __init__(self, input_dim, hidden_dim): super().__init__() self.linear_up = MLPLayerWithBN(input_dim, hidden_dim) self.activation = nn.Hardswish() self.linear_down = MLPLayerWithBN(hidden_dim, input_dim) def forward(self, hidden_state): hidden_state = self.linear_up(hidden_state) hidden_state = self.activation(hidden_state) hidden_state = self.linear_down(hidden_state) return hidden_state
class_definition
13,303
13,883
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,755
class LevitResidualLayer(nn.Module): """ Residual Block for LeViT """ def __init__(self, module, drop_rate): super().__init__() self.module = module self.drop_rate = drop_rate def forward(self, hidden_state): if self.training and self.drop_rate > 0: rnd = torch.rand(hidden_state.size(0), 1, 1, device=hidden_state.device) rnd = rnd.ge_(self.drop_rate).div(1 - self.drop_rate).detach() hidden_state = hidden_state + self.module(hidden_state) * rnd return hidden_state else: hidden_state = hidden_state + self.module(hidden_state) return hidden_state
class_definition
13,886
14,569
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,756
class LevitStage(nn.Module): """ LeViT Stage consisting of `LevitMLPLayer` and `LevitAttention` layers. """ def __init__( self, config, idx, hidden_sizes, key_dim, depths, num_attention_heads, attention_ratio, mlp_ratio, down_ops, resolution_in, ): super().__init__() self.layers = [] self.config = config self.resolution_in = resolution_in # resolution_in is the intial resolution, resolution_out is final resolution after downsampling for _ in range(depths): self.layers.append( LevitResidualLayer( LevitAttention(hidden_sizes, key_dim, num_attention_heads, attention_ratio, resolution_in), self.config.drop_path_rate, ) ) if mlp_ratio > 0: hidden_dim = hidden_sizes * mlp_ratio self.layers.append( LevitResidualLayer(LevitMLPLayer(hidden_sizes, hidden_dim), self.config.drop_path_rate) ) if down_ops[0] == "Subsample": self.resolution_out = (self.resolution_in - 1) // down_ops[5] + 1 self.layers.append( LevitAttentionSubsample( *self.config.hidden_sizes[idx : idx + 2], key_dim=down_ops[1], num_attention_heads=down_ops[2], attention_ratio=down_ops[3], stride=down_ops[5], resolution_in=resolution_in, resolution_out=self.resolution_out, ) ) self.resolution_in = self.resolution_out if down_ops[4] > 0: hidden_dim = self.config.hidden_sizes[idx + 1] * down_ops[4] self.layers.append( LevitResidualLayer( LevitMLPLayer(self.config.hidden_sizes[idx + 1], hidden_dim), self.config.drop_path_rate ) ) self.layers = nn.ModuleList(self.layers) def get_resolution(self): return self.resolution_in def forward(self, hidden_state): for layer in self.layers: hidden_state = layer(hidden_state) return hidden_state
class_definition
14,572
16,921
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,757
class LevitEncoder(nn.Module): """ LeViT Encoder consisting of multiple `LevitStage` stages. """ def __init__(self, config): super().__init__() self.config = config resolution = self.config.image_size // self.config.patch_size self.stages = [] self.config.down_ops.append([""]) for stage_idx in range(len(config.depths)): stage = LevitStage( config, stage_idx, config.hidden_sizes[stage_idx], config.key_dim[stage_idx], config.depths[stage_idx], config.num_attention_heads[stage_idx], config.attention_ratio[stage_idx], config.mlp_ratio[stage_idx], config.down_ops[stage_idx], resolution, ) resolution = stage.get_resolution() self.stages.append(stage) self.stages = nn.ModuleList(self.stages) def forward(self, hidden_state, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None for stage in self.stages: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) hidden_state = stage(hidden_state) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=all_hidden_states)
class_definition
16,924
18,570
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,758
class LevitClassificationLayer(nn.Module): """ LeViT Classification Layer """ def __init__(self, input_dim, output_dim): super().__init__() self.batch_norm = nn.BatchNorm1d(input_dim) self.linear = nn.Linear(input_dim, output_dim) def forward(self, hidden_state): hidden_state = self.batch_norm(hidden_state) logits = self.linear(hidden_state) return logits
class_definition
18,573
19,000
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,759
class LevitPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LevitConfig base_model_prefix = "levit" main_input_name = "pixel_values" _no_split_modules = ["LevitResidualLayer"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)): module.bias.data.zero_() module.weight.data.fill_(1.0)
class_definition
19,003
19,943
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,760
class LevitModel(LevitPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.patch_embeddings = LevitPatchEmbeddings(config) self.encoder = LevitEncoder(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: torch.FloatTensor = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embeddings = self.patch_embeddings(pixel_values) encoder_outputs = self.encoder( embeddings, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] # global average pooling, (batch_size, seq_length, hidden_sizes) -> (batch_size, hidden_sizes) pooled_output = last_hidden_state.mean(dim=1) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, )
class_definition
21,314
23,289
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,761
class LevitForImageClassification(LevitPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.num_labels = config.num_labels self.levit = LevitModel(config) # Classifier head self.classifier = ( LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else torch.nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: torch.FloatTensor = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.levit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = sequence_output.mean(1) logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, )
class_definition
23,489
26,812
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,762
class LevitForImageClassificationWithTeacher(LevitPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.num_labels = config.num_labels self.levit = LevitModel(config) # Classifier head self.classifier = ( LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else torch.nn.Identity() ) self.classifier_distill = ( LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else torch.nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=LevitForImageClassificationWithTeacherOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: torch.FloatTensor = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LevitForImageClassificationWithTeacherOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.levit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = sequence_output.mean(1) cls_logits, distill_logits = self.classifier(sequence_output), self.classifier_distill(sequence_output) logits = (cls_logits + distill_logits) / 2 if not return_dict: output = (logits, cls_logits, distill_logits) + outputs[2:] return output return LevitForImageClassificationWithTeacherOutput( logits=logits, cls_logits=cls_logits, distillation_logits=distill_logits, hidden_states=outputs.hidden_states, )
class_definition
27,247
29,364
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/modeling_levit.py
null
8,763
class LevitImageProcessor(BaseImageProcessor): r""" Constructs a LeViT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Wwhether to resize the shortest edge of the input to int(256/224 *`size`). Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]`, *optional*, defaults to `{"shortest_edge": 224}`): Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to `(size["shortest_egde"] * height / width, size["shortest_egde"])`. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether or not to center crop the input to `(crop_size["height"], crop_size["width"])`. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`Dict`, *optional*, defaults to `{"height": 224, "width": 224}`): Desired image size after `center_crop`. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`List[int]`, *optional*, defaults to `[0.485, 0.456, 0.406]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`List[int]`, *optional*, defaults to `[0.229, 0.224, 0.225]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN, image_std: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. If size is a dict with keys "width" and "height", the image will be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to `(size["shortest_egde"] * height / width, size["shortest_egde"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to (size * height / width, size). resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size_dict = get_size_dict(size, default_to_square=False) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: shortest_edge = int((256 / 224) * size["shortest_edge"]) output_size = get_resize_output_image_size( image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format ) size_dict = {"height": output_size[0], "width": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" ) return resize( image, size=(size_dict["height"], size_dict["width"]), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[Dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, Iterable[float]]] = None, image_std: Optional[Union[float, Iterable[float]]] = None, return_tensors: Optional[TensorType] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: """ Preprocess an image or batch of images to be used as input to a LeViT model. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to (size * height / width, size). resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the output image after center cropping. Crops images to (crop_size["height"], crop_size["width"]). do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image pixel values by `rescaling_factor` - typical to values between 0 and 1. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Factor to rescale the image pixel values by. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image pixel values by `image_mean` and `image_std`. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Mean to normalize the image pixel values by. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Standard deviation to normalize the image pixel values by. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image, size, resample, input_data_format=input_data_format) for image in images] if do_center_crop: images = [self.center_crop(image, crop_size, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
class_definition
1,356
16,565
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/image_processing_levit.py
null
8,764
class LevitConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LevitModel`]. It is used to instantiate a LeViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LeViT [facebook/levit-128S](https://huggingface.co/facebook/levit-128S) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size of the input image. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. kernel_size (`int`, *optional*, defaults to 3): The kernel size for the initial convolution layers of patch embedding. stride (`int`, *optional*, defaults to 2): The stride size for the initial convolution layers of patch embedding. padding (`int`, *optional*, defaults to 1): The padding size for the initial convolution layers of patch embedding. patch_size (`int`, *optional*, defaults to 16): The patch size for embeddings. hidden_sizes (`List[int]`, *optional*, defaults to `[128, 256, 384]`): Dimension of each of the encoder blocks. num_attention_heads (`List[int]`, *optional*, defaults to `[4, 8, 12]`): Number of attention heads for each attention layer in each block of the Transformer encoder. depths (`List[int]`, *optional*, defaults to `[4, 4, 4]`): The number of layers in each encoder block. key_dim (`List[int]`, *optional*, defaults to `[16, 16, 16]`): The size of key in each of the encoder blocks. drop_path_rate (`int`, *optional*, defaults to 0): The dropout probability for stochastic depths, used in the blocks of the Transformer encoder. mlp_ratios (`List[int]`, *optional*, defaults to `[2, 2, 2]`): Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks. attention_ratios (`List[int]`, *optional*, defaults to `[2, 2, 2]`): Ratio of the size of the output dimension compared to input dimension of attention layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import LevitConfig, LevitModel >>> # Initializing a LeViT levit-128S style configuration >>> configuration = LevitConfig() >>> # Initializing a model (with random weights) from the levit-128S style configuration >>> model = LevitModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "levit" def __init__( self, image_size=224, num_channels=3, kernel_size=3, stride=2, padding=1, patch_size=16, hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, mlp_ratio=[2, 2, 2], attention_ratio=[2, 2, 2], initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.num_channels = num_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.hidden_sizes = hidden_sizes self.num_attention_heads = num_attention_heads self.depths = depths self.key_dim = key_dim self.drop_path_rate = drop_path_rate self.patch_size = patch_size self.attention_ratio = attention_ratio self.mlp_ratio = mlp_ratio self.initializer_range = initializer_range self.down_ops = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ]
class_definition
933
5,244
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/configuration_levit.py
null
8,765
class LevitOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4
class_definition
5,317
5,715
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/configuration_levit.py
null
8,766
class LevitFeatureExtractor(LevitImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class LevitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use LevitImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
class_definition
837
1,203
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/levit/feature_extraction_levit.py
null
8,767
class PatchTSMixerGatedAttention(nn.Module): """ Module that applies gated attention to input data. Args: in_size (`int`): The input size. out_size (`int`): The output size. """ def __init__(self, in_size: int, out_size: int): super().__init__() self.attn_layer = nn.Linear(in_size, out_size) self.attn_softmax = nn.Softmax(dim=-1) def forward(self, inputs): attn_weight = self.attn_softmax(self.attn_layer(inputs)) inputs = inputs * attn_weight return inputs
class_definition
3,172
3,722
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,768
class PatchTSMixerBatchNorm(nn.Module): """ Compute batch normalization over the sequence length (time) dimension. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps) def forward(self, inputs: torch.Tensor): """ Parameters: inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`): input for Batch norm calculation Returns: `torch.Tensor` of shape `(batch_size, sequence_length, d_model)` """ output = inputs.transpose(1, 2) # output: (batch_size, d_model, sequence_length) output = self.batchnorm(output) return output.transpose(1, 2)
class_definition
3,832
4,607
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,769
class PatchTSMixerPositionalEncoding(nn.Module): """ Class for positional encoding """ def __init__(self, config: PatchTSMixerConfig): super().__init__() # positional encoding: [num_patches x d_model] if config.use_positional_encoding: self.position_enc = self._init_pe(config) else: self.position_enc = nn.Parameter(torch.zeros(config.num_patches, config.d_model)) @staticmethod def _init_pe(config: PatchTSMixerConfig) -> nn.Parameter: # Positional encoding if config.positional_encoding_type == "random": position_enc = nn.Parameter(torch.randn(config.num_patches, config.d_model), requires_grad=True) elif config.positional_encoding_type == "sincos": position_enc = torch.zeros(config.num_patches, config.d_model) position = torch.arange(0, config.num_patches).unsqueeze(1) div_term = torch.exp(torch.arange(0, config.d_model, 2) * -(math.log(10000.0) / config.d_model)) position_enc[:, 0::2] = torch.sin(position * div_term) position_enc[:, 1::2] = torch.cos(position * div_term) position_enc = position_enc - position_enc.mean() position_enc = position_enc / (position_enc.std() * 10) position_enc = nn.Parameter(position_enc, requires_grad=False) else: raise ValueError( f"{config.positional_encoding_type} is not a valid positional encoder. Available types are 'random' and 'sincos'." ) return position_enc def forward(self, patch_input: torch.Tensor): # hidden_state: [bs x num_channels x num_patches x d_model] hidden_state = patch_input + self.position_enc return hidden_state
class_definition
4,610
6,396
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,770
class PatchTSMixerNormLayer(nn.Module): """Normalization block Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm_mlp = config.norm_mlp if "batch" in config.norm_mlp.lower(): self.norm = PatchTSMixerBatchNorm(config) else: self.norm = nn.LayerNorm(config.d_model, eps=config.norm_eps) def forward(self, inputs: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): Input to the normalization layer. Returns: `torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))` """ if "batch" in self.norm_mlp.lower(): # reshape the data inputs_reshaped = torch.reshape( inputs, ( inputs.shape[0] * inputs.shape[1], inputs.shape[2], inputs.shape[3], ), ) # inputs_reshaped: [batch_size*num_channels, num_patches, d_model] # inputs_reshaped: [batch_size*num_channels, num_patches, d_model] inputs_reshaped = self.norm(inputs_reshaped) # put back data to the original shape inputs = torch.reshape(inputs_reshaped, inputs.shape) else: inputs = self.norm(inputs) return inputs
class_definition
6,399
7,924
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,771
class PatchTSMixerMLP(nn.Module): def __init__(self, in_features, out_features, config): super().__init__() num_hidden = in_features * config.expansion_factor self.fc1 = nn.Linear(in_features, num_hidden) self.dropout1 = nn.Dropout(config.dropout) self.fc2 = nn.Linear(num_hidden, out_features) self.dropout2 = nn.Dropout(config.dropout) def forward(self, inputs: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): Input to the MLP layer. Returns: `torch.Tensor` of the same shape as `inputs` """ inputs = self.dropout1(nn.functional.gelu(self.fc1(inputs))) inputs = self.fc2(inputs) inputs = self.dropout2(inputs) return inputs
class_definition
7,927
8,777
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,772
class PatchTSMixerChannelFeatureMixerBlock(nn.Module): """This module mixes the features in the channel dimension. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP( in_features=config.num_input_channels, out_features=config.num_input_channels, config=config, ) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention( in_size=config.num_input_channels, out_size=config.num_input_channels ) def forward(self, inputs: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): input to the MLP layer Returns: `torch.Tensor` of the same shape as `inputs` """ residual = inputs inputs = self.norm(inputs) inputs = inputs.permute(0, 3, 2, 1) if self.gated_attn: inputs = self.gating_block(inputs) inputs = self.mlp(inputs) inputs = inputs.permute(0, 3, 2, 1) out = inputs + residual return out
class_definition
8,780
10,135
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,773
class PatchTSMixerAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[PatchTSMixerConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value
class_definition
10,229
17,635
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,774
class PatchMixerBlock(nn.Module): """This module mixes the patch dimension. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.self_attn = config.self_attn self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP( in_features=config.num_patches, out_features=config.num_patches, config=config, ) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention(in_size=config.num_patches, out_size=config.num_patches) if config.self_attn: self.self_attn_layer = PatchTSMixerAttention( embed_dim=config.d_model, num_heads=config.self_attn_heads, dropout=config.dropout, ) self.norm_attn = PatchTSMixerNormLayer(config) def forward(self, hidden_state): """ Args: hidden_state (`torch.Tensor`): Input tensor. Returns: `torch.Tensor`: Transformed tensor. """ residual = hidden_state hidden_state = self.norm(hidden_state) if self.self_attn: batch_size, n_vars, num_patches, d_model = hidden_state.shape hidden_state_reshaped = hidden_state.reshape(batch_size * n_vars, num_patches, d_model) x_attn, _, _ = self.self_attn_layer(hidden_state_reshaped, output_attentions=False) x_attn = x_attn.reshape(batch_size, n_vars, num_patches, d_model) # Transpose so that num_patches is the last dimension hidden_state = hidden_state.transpose(2, 3) hidden_state = self.mlp(hidden_state) if self.gated_attn: hidden_state = self.gating_block(hidden_state) # Transpose back hidden_state = hidden_state.transpose(2, 3) if self.self_attn: hidden_state = self.norm_attn(hidden_state + x_attn) out = hidden_state + residual return out
class_definition
17,638
19,757
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,775
class FeatureMixerBlock(nn.Module): """This module mixes the hidden feature dimension. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP( in_features=config.d_model, out_features=config.d_model, config=config, ) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention(in_size=config.d_model, out_size=config.d_model) def forward(self, hidden: torch.Tensor): """ Args: hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`): Input tensor to the layer. Returns: `torch.Tensor`: Transformed tensor. """ residual = hidden hidden = self.norm(hidden) hidden = self.mlp(hidden) if self.gated_attn: hidden = self.gating_block(hidden) out = hidden + residual return out
class_definition
19,760
20,905
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,776
class PatchTSMixerLayer(nn.Module): """ The `PatchTSMixer` layer that does all three kinds of mixing. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.patch_mixer = PatchMixerBlock(config=config) self.feature_mixer = FeatureMixerBlock(config=config) self.mode = config.mode if config.mode == "mix_channel": self.channel_feature_mixer = PatchTSMixerChannelFeatureMixerBlock(config=config) def forward(self, hidden: torch.Tensor): """ Args: hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`): Input tensor to the layer. Returns: `torch.Tensor`: Transformed tensor. """ if self.mode == "mix_channel": hidden = self.channel_feature_mixer(hidden) hidden = self.patch_mixer(hidden) hidden = self.feature_mixer(hidden) # hidden: (batch_size x num_patches x d_model) return hidden
class_definition
20,908
22,000
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,777
class PatchTSMixerBlock(nn.Module): """The main computing framework of the `PatchTSMixer` model. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() num_layers = config.num_layers self.mixers = nn.ModuleList([PatchTSMixerLayer(config=config) for _ in range(num_layers)]) def forward(self, hidden_state, output_hidden_states: bool = False): """ Args: hidden_state (`torch.Tensor`): The input tensor. output_hidden_states (`bool`, *optional*, defaults to False.): Whether to output the hidden states as well. Returns: `torch.Tensor`: The embedding. `list`: List of all hidden states if `output_hidden_states` is set to `True`. """ all_hidden_states = [] embedding = hidden_state for mod in self.mixers: embedding = mod(embedding) if output_hidden_states: all_hidden_states.append(embedding) if output_hidden_states: return embedding, all_hidden_states else: return embedding, None
class_definition
22,003
23,225
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,778
class PatchTSMixerForPredictionHead(nn.Module): """Prediction Head for Forecasting Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig, distribution_output=None): super().__init__() self.prediction_channel_indices = config.prediction_channel_indices if self.prediction_channel_indices is not None: self.prediction_channel_indices.sort() self.dropout_layer = nn.Dropout(config.head_dropout) if distribution_output is None: self.base_forecast_block = nn.Linear((config.num_patches * config.d_model), config.prediction_length) else: self.base_forecast_block = distribution_output.get_parameter_projection( config.num_patches * config.d_model ) self.flatten = nn.Flatten(start_dim=-2) def forward(self, hidden_features): """ Args: hidden_features (`torch.Tensor` of shape `(batch_size, num_patch, d_model)` in `flatten` mode or `(batch_size, n_vars, num_patch, d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size, prediction_length, nvars)`. """ hidden_features = self.flatten(hidden_features) # [batch_size x n_vars x num_patch * d_model] hidden_features = self.dropout_layer(hidden_features) # [batch_size x n_vars x num_patch * d_model] forecast = self.base_forecast_block(hidden_features) # [batch_size x n_vars x prediction_length] if isinstance(forecast, tuple): forecast = tuple(z.transpose(-1, -2) for z in forecast) else: forecast = forecast.transpose(-1, -2) # [batch_size x prediction_length x n_vars] if self.prediction_channel_indices is not None: if isinstance(forecast, tuple): forecast = tuple(z[..., self.prediction_channel_indices] for z in forecast) else: forecast = forecast[..., self.prediction_channel_indices] # [batch_size x prediction_length x n_vars] return forecast
class_definition
23,228
25,437
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,779
class PatchTSMixerLinearHead(nn.Module): """Linear head for Classification and Regression. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig, distribution_output=None): super().__init__() self.head_aggregation = config.head_aggregation self.output_range = config.output_range if config.head_aggregation is None: mul_factor = config.num_patches else: mul_factor = 1 self.distribution_output = distribution_output if distribution_output is None: self.projection = nn.Linear( config.d_model * config.num_input_channels * mul_factor, config.num_targets, ) else: self.projection = distribution_output.get_parameter_projection( config.d_model * config.num_input_channels * mul_factor ) if config.head_aggregation is None: self.flatten = nn.Flatten(start_dim=-3) else: self.flatten = nn.Flatten(start_dim=-2) self.dropout = nn.Dropout(config.head_dropout) def forward(self, hidden_features): """ Args: hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size x num_targets)`. """ # batch_size x d_model x num_patch or batch_size x n_vars x d_model x num_patch hidden_features = hidden_features.transpose(-1, -2) if self.head_aggregation == "use_last": # batch_size x d_model (flatten) or # batch_size x n_vars x d_model (common_channel) hidden_features = hidden_features[..., -1] elif self.head_aggregation == "max_pool": # batch_size x n_vars x d_model or batch_size x d_model hidden_features = hidden_features.max(dim=-1).values elif self.head_aggregation == "avg_pool": # batch_size x n_vars x d_model or batch_size x d_model hidden_features = hidden_features.mean(dim=-1) if self.flatten: hidden_features = self.flatten(hidden_features) hidden_features = self.dropout(hidden_features) hidden_features = self.projection(hidden_features) # batch_size x num_targets if (self.distribution_output is None) and (self.output_range is not None): hidden_features = ( torch.sigmoid(hidden_features) * (self.output_range[1] - self.output_range[0]) + self.output_range[0] ) return hidden_features
class_definition
25,440
28,248
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,780
class PatchTSMixerPreTrainedModel(PreTrainedModel): # Weight initialization config_class = PatchTSMixerConfig base_model_prefix = "model" main_input_name = "past_values" supports_gradient_checkpointing = False def _init_weights(self, module): """Initialize weights""" if isinstance(module, PatchTSMixerPositionalEncoding): # initialize positional encoding if self.config.positional_encoding_type == "random": nn.init.normal_(module.position_enc, mean=0.0, std=0.1) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm1d)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, PatchTSMixerBatchNorm): module.batchnorm.bias.data.zero_() module.batchnorm.weight.data.fill_(1.0) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_()
class_definition
28,251
29,295
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,781
class PatchTSMixerPretrainHead(nn.Module): """Pretraining head. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dropout_layer = nn.Dropout(config.head_dropout) self.base_pt_block = nn.Linear(config.d_model, config.patch_length) def forward(self, hidden_features): """ Args: hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size x n_vars x num_patch x patch_length)`. """ hidden_features = self.dropout_layer(hidden_features) forecast = self.base_pt_block(hidden_features) # [batch_size x n_vars x num_patch x patch_length] return forecast
class_definition
29,298
30,301
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,782
class PatchTSMixerPatchify(nn.Module): """ A class to patchify the time series sequence into different patches Returns: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.sequence_length = config.context_length self.patch_length = config.patch_length self.patch_stride = config.patch_stride if self.sequence_length <= self.patch_length: raise ValueError( f"Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})" ) # get the number of patches self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1) self.sequence_start = self.sequence_length - new_sequence_length def forward(self, past_values: torch.Tensor): """ Parameters: past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*): Input for patchification Returns: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` """ sequence_length = past_values.shape[-2] if sequence_length != self.sequence_length: raise ValueError( f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length})." ) # output: [bs x new_sequence_length x num_channels] output = past_values[:, self.sequence_start :, :] # output: [bs x num_patches x num_input_channels x patch_length] output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride) # output: [bs x num_input_channels x num_patches x patch_length] output = output.transpose(-2, -3).contiguous() return output
class_definition
35,903
37,958
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,783
class PatchTSMixerMasking(nn.Module): """ Class to perform random or forecast masking. Parameters: config (`PatchTSMixerConfig`): model config Returns: x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) Masked patched input mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) Bool tensor indicating True on masked points """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.random_mask_ratio = config.random_mask_ratio self.channel_consistent_masking = config.channel_consistent_masking self.mask_type = config.mask_type self.num_forecast_mask_patches = config.num_forecast_mask_patches self.unmasked_channel_indices = config.unmasked_channel_indices self.mask_value = config.mask_value if self.unmasked_channel_indices is not None: self.unmasked_channel_indices = sorted(self.unmasked_channel_indices) def forward(self, patch_input: torch.Tensor): """ Parameters: patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): Patch input Return: masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) Masked patched input mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) Bool tensor indicating True on masked points """ if self.mask_type == "random": masked_input, mask = random_masking( inputs=patch_input, mask_ratio=self.random_mask_ratio, unmasked_channel_indices=self.unmasked_channel_indices, channel_consistent_masking=self.channel_consistent_masking, mask_value=self.mask_value, ) elif self.mask_type == "forecast": masked_input, mask = forecast_masking( inputs=patch_input, num_forecast_mask_patches=self.num_forecast_mask_patches, unmasked_channel_indices=self.unmasked_channel_indices, mask_value=self.mask_value, ) else: raise ValueError(f"Invalid mask type {self.mask_type}.") # mask: [bs x num_input_channels x num_patch] mask = mask.bool() return masked_input, mask
class_definition
38,066
40,564
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,784
class PatchTSMixerStdScaler(nn.Module): """ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by subtracting from the mean and dividing by the standard deviation. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5 def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale
class_definition
40,674
42,420
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,785
class PatchTSMixerMeanScaler(nn.Module): """ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data accordingly. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 self.default_scale = config.default_scale if hasattr(config, "default_scale") else None def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) scale = ts_sum / torch.clamp(num_observed, min=1) # If `default_scale` is provided, we use it, otherwise we use the scale # of the batch. if self.default_scale is None: batch_sum = ts_sum.sum(dim=0) batch_observations = torch.clamp(num_observed.sum(0), min=1) default_scale = torch.squeeze(batch_sum / batch_observations) else: default_scale = self.default_scale * torch.ones_like(scale) # apply default scale where there are no observations scale = torch.where(num_observed > 0, scale, default_scale) # ensure the scale is at least `self.minimum_scale` scale = torch.clamp(scale, min=self.minimum_scale) scaled_data = data / scale if not self.keepdim: scale = scale.squeeze(dim=self.dim) return scaled_data, torch.zeros_like(scale), scale
class_definition
42,531
44,938
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,786
class PatchTSMixerNOPScaler(nn.Module): """ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale
class_definition
45,048
46,255
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,787
class PatchTSMixerEncoderOutput(ModelOutput): """ Base class for `PatchTSMixerEncoderOutput`, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, d_model)`): Hidden-state at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None
class_definition
46,269
46,841
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,788
class PatchTSMixerEncoder(PatchTSMixerPreTrainedModel): """ Encoder for PatchTSMixer which inputs patched time-series and outputs patched embeddings. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.use_return_dict = config.use_return_dict self.patcher = nn.Linear(config.patch_length, config.d_model) if config.use_positional_encoding: self.positional_encoder = PatchTSMixerPositionalEncoding(config=config) else: self.positional_encoder = None self.mlp_mixer_encoder = PatchTSMixerBlock(config=config) # Initialize weights and apply final processing if config.post_init: self.post_init() @replace_return_docstrings(output_type=PatchTSMixerEncoderOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, PatchTSMixerEncoderOutput]: r""" Args: past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: `torch.FloatTensor` of shape `(batch_size, n_vars, num_patches, d_model)` """ return_dict = return_dict if return_dict is not None else self.use_return_dict # flatten [bs x num_patch x d_model]. common_channel/mix_channel: [bs x n_vars x num_patch x d_model] patches = self.patcher(past_values) # add positional encoder if self.positional_encoder is not None: patches = self.positional_encoder(patches) last_hidden_state, hidden_states = self.mlp_mixer_encoder(patches, output_hidden_states=output_hidden_states) if not return_dict: return tuple( v for v in [ last_hidden_state, hidden_states, ] ) return PatchTSMixerEncoderOutput(last_hidden_state=last_hidden_state, hidden_states=hidden_states)
class_definition
46,844
49,839
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,789
class PatchTSMixerModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, d_model)`): Hidden-state at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): Patched input data to the model. mask: (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`,*optional*): Bool Tensor indicating True in masked patches and False otherwise. loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`,*optional*): Gives the mean of the context window per channel. Used for revin denorm outside the model, if revin enabled. scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`,*optional*): Gives the std dev of the context window per channel. Used for revin denorm outside the model, if revin enabled. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None patch_input: torch.FloatTensor = None mask: Optional[torch.FloatTensor] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None
class_definition
49,853
51,367
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,790
class PatchTSMixerModel(PatchTSMixerPreTrainedModel): def __init__(self, config: PatchTSMixerConfig, mask_input: bool = False): super().__init__(config) self.use_return_dict = config.use_return_dict self.encoder = PatchTSMixerEncoder(config) self.patching = PatchTSMixerPatchify(config) if mask_input is True: self.masking = PatchTSMixerMasking(config) else: self.masking = None if config.scaling == "mean": self.scaler = PatchTSMixerMeanScaler(config) elif config.scaling == "std" or config.scaling is True: self.scaler = PatchTSMixerStdScaler(config) else: self.scaler = PatchTSMixerNOPScaler(config) # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=PatchTSMixerModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> PatchTSMixerModelOutput: r""" observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Returns: """ return_dict = return_dict if return_dict is not None else self.use_return_dict mask = None if observed_mask is None: observed_mask = torch.ones_like(past_values) scaled_past_values, loc, scale = self.scaler(past_values, observed_mask) patched_x = self.patching(scaled_past_values) # [batch_size x num_input_channels x num_patch x patch_length enc_input = patched_x if self.masking is not None: enc_input, mask = self.masking(patched_x) # enc_input: [batch_size x num_input_channels x num_patch x patch_length] # mask: [batch_size x num_input_channels x num_patch] encoder_output = self.encoder( enc_input, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if isinstance(encoder_output, tuple): encoder_output = PatchTSMixerEncoderOutput(*encoder_output) if not return_dict: return tuple( v for v in [ encoder_output.last_hidden_state, encoder_output.hidden_states, patched_x, mask, loc, scale, ] ) return PatchTSMixerModelOutput( last_hidden_state=encoder_output.last_hidden_state, hidden_states=encoder_output.hidden_states, patch_input=patched_x, mask=mask, loc=loc, scale=scale, )
class_definition
51,488
54,795
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,791
class PatchTSMixerForPreTrainingOutput(ModelOutput): """ Output type of [`PatchTSMixerForPreTrainingOutput`]. Args: prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, patch_length)`): Prediction output from the pretrain head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss """ loss: Optional[torch.FloatTensor] = None prediction_outputs: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None
class_definition
54,809
55,752
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,792
class PatchTSMixerForPretraining(PatchTSMixerPreTrainedModel): r""" `PatchTSMixer` for mask pretraining. Args: config (`PatchTSMixerConfig`): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config, mask_input=True) self.head = PatchTSMixerPretrainHead(config=config) self.masked_loss = config.masked_loss self.use_return_dict = config.use_return_dict # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=PatchTSMixerForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForPreTrainingOutput: r""" observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. Returns: """ return_dict = return_dict if return_dict is not None else self.use_return_dict if self.masked_loss is True: loss = torch.nn.MSELoss(reduction="none") else: loss = torch.nn.MSELoss(reduction="mean") # past_values: tensor [batch_size x context_length x num_input_channels] model_output = self.model( past_values, observed_mask=observed_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # x.last_hidden_state: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) x_hat = self.head(model_output.last_hidden_state) # tensor [batch_size x nvars x num_patch x patch_length] if return_loss is True: loss_val = loss(x_hat, model_output.patch_input) else: loss_val = None # calculate masked_loss if self.masked_loss is True and loss_val is not None: loss_val = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10) if not return_dict: return tuple( v for v in [ loss_val, x_hat, model_output.last_hidden_state, model_output.hidden_states, ] ) return PatchTSMixerForPreTrainingOutput( loss=loss_val, prediction_outputs=x_hat, # tensor [batch_size x nvars x num_patch x patch_length] last_hidden_state=model_output.last_hidden_state, # x: [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, )
class_definition
55,755
59,242
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,793
class PatchTSMixerForPredictionOutput(ModelOutput): """ Output type of [`PatchTSMixerForPredictionOutput`]. Args: prediction_outputs (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_input_channels)`): Prediction output from the forecast head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss. loc (`torch.FloatTensor`, *optional* of shape `(batch_size, 1, num_input_channels)`): Input mean scale (`torch.FloatTensor`, *optional* of shape `(batch_size, 1, num_input_channels)`): Input std dev """ loss: Optional[torch.FloatTensor] = None prediction_outputs: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None loc: torch.FloatTensor = None scale: torch.FloatTensor = None
class_definition
59,256
60,544
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,794
class SamplePatchTSMixerPredictionOutput(ModelOutput): """ Base class for time series model's predictions outputs that contains the sampled values from the chosen distribution. Args: sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length, number_channels)`): Sampled values from the chosen distribution. """ sequences: torch.FloatTensor = None
class_definition
60,558
60,977
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,795
class SamplePatchTSMixerRegressionOutput(ModelOutput): """ Base class for time series model's predictions outputs that contains the sampled values from the chosen distribution. Args: sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, num_targets)` Sampled values from the chosen distribution. """ sequences: torch.FloatTensor = None
class_definition
60,991
61,389
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,796
class PatchTSMixerForPrediction(PatchTSMixerPreTrainedModel): r""" `PatchTSMixer` for forecasting application. Args: config (`PatchTSMixerConfig`): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.loss = config.loss self.use_return_dict = config.use_return_dict self.prediction_channel_indices = config.prediction_channel_indices self.num_parallel_samples = config.num_parallel_samples if config.loss == "mse": self.distribution_output = None else: dim = config.prediction_length distribution_output_map = { "student_t": StudentTOutput, "normal": NormalOutput, "negative_binomial": NegativeBinomialOutput, } output_class = distribution_output_map.get(config.distribution_output, None) if output_class is not None: self.distribution_output = output_class(dim=dim) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") self.model = PatchTSMixerModel(config) self.head = PatchTSMixerForPredictionHead( config=config, distribution_output=self.distribution_output, ) # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=PatchTSMixerForPredictionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForPredictionOutput: r""" observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). future_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting,: `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `future_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. Returns: """ if self.loss == "mse": loss = nn.MSELoss(reduction="mean") elif self.loss == "nll": loss = nll else: raise ValueError("Invalid loss function: Allowed values: mse and nll") return_dict = return_dict if return_dict is not None else self.use_return_dict # past_values: tensor [batch_size x context_length x num_input_channels] model_output = self.model( past_values, observed_mask=observed_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # model_output: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) # tensor [batch_size x prediction_length x num_input_channels] y_hat = self.head(model_output.last_hidden_state) loss_val = None if self.prediction_channel_indices is not None: if self.distribution_output: distribution = self.distribution_output.distribution( y_hat, loc=model_output.loc[..., self.prediction_channel_indices], scale=model_output.scale[..., self.prediction_channel_indices], ) if future_values is not None and return_loss is True: loss_val = loss( distribution, future_values[..., self.prediction_channel_indices], ) # take average of the loss loss_val = weighted_average(loss_val) else: y_hat = ( y_hat * model_output.scale[..., self.prediction_channel_indices] + model_output.loc[..., self.prediction_channel_indices] ) if future_values is not None and return_loss is True: loss_val = loss(y_hat, future_values[..., self.prediction_channel_indices]) else: if self.distribution_output: distribution = self.distribution_output.distribution( y_hat, loc=model_output.loc, scale=model_output.scale ) if future_values is not None and return_loss is True: loss_val = loss(distribution, future_values) loss_val = weighted_average(loss_val) else: y_hat = y_hat * model_output.scale + model_output.loc if future_values is not None and return_loss is True: loss_val = loss(y_hat, future_values) if self.prediction_channel_indices is not None: loc = model_output.loc[..., self.prediction_channel_indices] scale = model_output.scale[..., self.prediction_channel_indices] else: loc = model_output.loc scale = model_output.scale if not return_dict: return tuple( v for v in [ loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states, loc, scale, ] ) return PatchTSMixerForPredictionOutput( loss=loss_val, prediction_outputs=y_hat, # tensor [batch_size x prediction_length x num_input_channels] last_hidden_state=model_output.last_hidden_state, # x: [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, loc=loc, scale=scale, ) def generate( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, ) -> SamplePatchTSMixerPredictionOutput: """ Generate sequences of sample predictions from a model with a probability distribution head. Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the future. observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Return: [`SamplePatchTSMixerPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, prediction_length, num_input_channels)`. """ # get number of samples num_parallel_samples = self.num_parallel_samples # get model output outputs = self( past_values=past_values, future_values=None, observed_mask=observed_mask, output_hidden_states=False, ) # get distribution distribution = self.distribution_output.distribution( outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale ) # get samples: list of [batch_size x prediction_length x num_channels] samples = [distribution.sample() for _ in range(num_parallel_samples)] # stack tensors samples = torch.stack(samples, dim=1) # [batch_size x num_samples x prediction_length x num_channels] return SamplePatchTSMixerPredictionOutput(sequences=samples)
class_definition
62,950
72,119
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,797
class PatchTSMixerForTimeSeriesClassificationOutput(ModelOutput): """ Output type of [`PatchTSMixerForTimeSeriesClassificationOutput`]. Args: prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_labels)`): Prediction output from the classfication head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss. """ loss: Optional[torch.FloatTensor] = None prediction_outputs: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None
class_definition
72,133
73,117
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,798
class PatchTSMixerForTimeSeriesClassification(PatchTSMixerPreTrainedModel): r""" `PatchTSMixer` for classification application. Args: config (`PatchTSMixerConfig`): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config) self.head = PatchTSMixerLinearHead( config=config, ) self.use_return_dict = config.use_return_dict if config.scaling in ["std", "mean", True]: self.inject_scale = InjectScalerStatistics4D(d_model=config.d_model, num_patches=config.num_patches) else: self.inject_scale = None # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings( output_type=PatchTSMixerForTimeSeriesClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, past_values: torch.Tensor, target_values: torch.Tensor = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForTimeSeriesClassificationOutput: r""" target_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting, `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `target_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. For a classification task, it has a shape of `(batch_size,)`. For a regression task, it has a shape of `(batch_size, num_targets)`. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. Returns: """ loss = torch.nn.CrossEntropyLoss() return_dict = return_dict if return_dict is not None else self.use_return_dict model_output = self.model( past_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # x: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) if self.inject_scale is not None: model_output.last_hidden_state = self.inject_scale( model_output.last_hidden_state, loc=model_output.loc, scale=model_output.scale, ) # x: [batch_size x nvars x num_patch x d_model] y_hat = self.head(model_output.last_hidden_state) # tensor [batch_size x n_labels] if target_values is not None and return_loss is True: loss_val = loss(y_hat, target_values) else: loss_val = None if not return_dict: return tuple( v for v in [ loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states, ] ) return PatchTSMixerForTimeSeriesClassificationOutput( loss=loss_val, prediction_outputs=y_hat, # tensor [batch_size x n_labels] last_hidden_state=model_output.last_hidden_state, # x: [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, )
class_definition
73,120
77,315
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
null
8,799