text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
# retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length )
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # embed positions positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..." ) use_cache = False
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: assert attn_mask.size()[0] == (len(self.layers)), ( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None),
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0]
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,)
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, )
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
class Speech2TextModel(Speech2TextPreTrainedModel): def __init__(self, config: Speech2TextConfig): super().__init__(config) self.encoder = Speech2TextEncoder(config) self.decoder = Speech2TextDecoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder
3,286
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
@add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_features: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None,
3,286
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" Returns:
3,286
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
Example: ```python >>> import torch >>> from transformers import Speech2TextModel, AutoFeatureExtractor >>> from datasets import load_dataset >>> model = Speech2TextModel.from_pretrained("facebook/s2t-small-librispeech-asr") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-librispeech-asr") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor( ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" ... ) >>> input_features = inputs.input_features >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 256] ```"""
3,286
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,286
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
if encoder_outputs is None: encoder_outputs = self.encoder( input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, )
3,286
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# downsample encoder attention mask if attention_mask is not None: encoder_attention_mask = self._get_feature_vector_attention_mask( encoder_outputs[0].shape[1], attention_mask ) else: encoder_attention_mask = None # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
3,286
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, )
3,286
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
class Speech2TextForConditionalGeneration(Speech2TextPreTrainedModel, GenerationMixin): base_model_prefix = "model" _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: Speech2TextConfig): super().__init__(config) self.model = Speech2TextModel(config) self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings
3,287
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
@add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_features: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None,
3,287
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
3,287
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
Returns: Example: ```python >>> import torch >>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration >>> from datasets import load_dataset >>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") >>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor( ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" ... ) >>> input_features = inputs.input_features >>> generated_ids = model.generate(inputs=input_features)
3,287
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> transcription 'mister quilter is the apostle of the middle classes and we are glad to welcome his gospel' ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id )
3,287
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
outputs = self.model( input_features, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
3,287
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
3,287
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
class TFConv1dSubsampler(keras.layers.Layer): """ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation via gated linear units (https://arxiv.org/abs/1911.08460) """ def __init__(self, config: Speech2TextConfig, **kwargs): super().__init__(**kwargs) self.config = config self.num_layers = config.num_conv_layers self.in_channels = config.input_feat_per_channel * config.input_channels self.mid_channels = config.conv_channels self.out_channels = config.d_model self.kernel_sizes = config.conv_kernel_sizes self.conv_layers = [ keras.layers.Conv1D( filters=self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2, kernel_size=k, strides=2, name=f"conv_layers.{i}", ) for i, k in enumerate(self.kernel_sizes) ]
3,288
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
def call(self, input_features: tf.Tensor) -> tf.Tensor: # TF Conv1D assumes Batch x Time x Channels, same as the input hidden_states = tf.cast(input_features, tf.float32) for i, conv in enumerate(self.conv_layers): # equivalent to `padding=k // 2` on PT's `nn.Conv1d` pad_len = self.kernel_sizes[i] // 2 hidden_shapes = shape_list(hidden_states) hidden_states = tf.concat( ( tf.zeros((hidden_shapes[0], pad_len, hidden_shapes[2])), hidden_states, tf.zeros((hidden_shapes[0], pad_len, hidden_shapes[2])), ), axis=1, ) hidden_states = conv(hidden_states) hidden_states = glu(hidden_states, axis=2) # GLU over the Channel dimension return hidden_states
3,288
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv_layers", None) is not None: for i, layer in enumerate(self.conv_layers): with tf.name_scope(layer.name): layer.build([None, None, self.in_channels] if i == 0 else [None, None, self.mid_channels // 2])
3,288
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
class TFSpeech2TextSinusoidalPositionalEmbedding(keras.layers.Layer): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None, **kwargs): super().__init__(**kwargs) self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.embedding_weights = self._get_embedding(num_positions + self.offset, embedding_dim, padding_idx)
3,289
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
@staticmethod def _get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None) -> tf.Tensor: """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = tf.math.log(10000.0) / (half_dim - 1) emb = tf.math.exp(tf.range(half_dim, dtype=tf.float32) * -emb) emb = tf.expand_dims(tf.range(num_embeddings, dtype=tf.float32), axis=1) * tf.expand_dims(emb, axis=0) emb = tf.reshape(tf.concat([tf.math.sin(emb), tf.math.cos(emb)], axis=1), shape=[num_embeddings, -1]) if embedding_dim % 2 == 1: # zero pad emb = tf.concat([emb, tf.zeros(num_embeddings, 1)], axis=1) if padding_idx is not None: emb = tf.concat([emb[:padding_idx, :], tf.zeros((1, tf.shape(emb)[1])), emb[padding_idx + 1 :, :]], axis=0)
3,289
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
return emb
3,289
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
def call(self, input_ids: tf.Tensor, past_key_values_length: int = 0) -> tf.Tensor: bsz, seq_len = shape_list(input_ids) # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) # Matt: The PyTorch code does a lot of work to cache the embeddings, setting the cached values as a # model attribute in the forward pass. This is extremely forbidden in TF, which wants forward calls to be # idempotent. TF doesn't need that caching anyway, since it can just store constants during compilation, # so we just remove all of that code. embeddings = self._get_embedding( self.padding_idx + 1 + seq_len + self.offset + past_key_values_length, self.embedding_dim, self.padding_idx ) return tf.reshape(tf.gather(embeddings, tf.reshape(position_ids, (-1,)), axis=0), (bsz, seq_len, -1))
3,289
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
@staticmethod def create_position_ids_from_input_ids( input_ids: tf.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0 ) -> tf.Tensor: """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: tf.Tensor x: Returns: tf.Tensor """ mask = tf.cast(tf.math.not_equal(input_ids, padding_idx), dtype=tf.int32) incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask return tf.cast(incremental_indices, dtype=tf.int64) + padding_idx
3,289
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
class TFSpeech2TextAttention(keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder
3,290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel"""
3,290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
# if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states)
3,290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
# get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention
3,290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
3,290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape)
3,290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), )
3,290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
3,290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value
3,290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim])
3,290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
class TFSpeech2TextEncoderLayer(keras.layers.Layer): def __init__(self, config: Speech2TextConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFSpeech2TextAttention( self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" ) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config
3,291
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool = False ): """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)` """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training, )
3,291
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
tf.debugging.assert_equal( shape_list(hidden_states), shape_list(residual), message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return hidden_states, self_attn_weights
3,291
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.encoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim])
3,291
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
class TFSpeech2TextDecoderLayer(keras.layers.Layer): def __init__(self, config: Speech2TextConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFSpeech2TextAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
3,292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.encoder_attn = TFSpeech2TextAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name="encoder_attn", is_decoder=True, ) self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config
3,292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
def call( self, hidden_states, attention_mask: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, cross_attn_layer_head_mask: tf.Tensor | None = None, past_key_value: Tuple[tf.Tensor] | None = None, training=False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
3,292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(decoder_attention_heads,)` cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. `(decoder_attention_heads,)` past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states)
3,292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
# Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states)
3,292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, training=training, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value
3,292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
# Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return ( hidden_states, self_attn_weights, cross_attn_weights, present_key_value, )
3,292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "encoder_attn", None) is not None: with tf.name_scope(self.encoder_attn.name): self.encoder_attn.build(None) if getattr(self, "encoder_attn_layer_norm", None) is not None: with tf.name_scope(self.encoder_attn_layer_norm.name): self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim])
3,292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.decoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim])
3,292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
class TFSpeech2TextPreTrainedModel(TFPreTrainedModel): config_class = Speech2TextConfig base_model_prefix = "model" main_input_name = "input_features" _keys_to_ignore_on_load_unexpected = [r"encoder.embed_positions.weights"] def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor): """ Computes the output length of the convolutional layers """ for _ in range(self.config.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths
3,293
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
@property def input_signature(self): return { "input_features": tf.TensorSpec( (None, None, self.config.input_feat_per_channel * self.config.input_channels), tf.float32, name="input_features", ), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"), "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"), }
3,293
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
class TFSpeech2TextEncoder(keras.layers.Layer): config_class = Speech2TextConfig """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`TFSpeech2TextEncoderLayer`]. Args: config: Speech2TextConfig """ def __init__(self, config: Speech2TextConfig, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = keras.layers.Dropout(config.dropout) self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_source_positions self.embed_scale = tf.math.sqrt(float(embed_dim)) if config.scale_embedding else 1.0 self.conv = TFConv1dSubsampler(config, name="conv")
3,294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
self.embed_positions = TFSpeech2TextSinusoidalPositionalEmbedding( num_positions=config.max_source_positions, embedding_dim=embed_dim, padding_idx=self.padding_idx, name="embed_positions", ) self.layers = [TFSpeech2TextEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor): """ Computes the output length of the convolutional layers """ for _ in range(self.config.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths
3,294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask): # generate creates 3D attention mask, because of the shape of input_features # convert it to 2D if thats the case if len(attention_mask.shape) > 2: attention_mask = attention_mask[:, :, -1] subsampled_lengths = self._get_feat_extract_output_lengths(tf.math.reduce_sum(attention_mask, -1)) bsz = shape_list(attention_mask)[0] indices = tf.concat( ( tf.expand_dims(tf.range(bsz, dtype=attention_mask.dtype), -1), tf.expand_dims(subsampled_lengths - 1, -1), ), axis=-1, ) attention_mask = tf.scatter_nd(indices=indices, updates=tf.ones(bsz), shape=[bsz, feature_vector_length]) attention_mask = tf.cast(tf.reverse(tf.math.cumsum(tf.reverse(attention_mask, [-1]), -1), [-1]), tf.int64) return attention_mask
3,294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
@unpack_inputs def call( self, input_features=None, attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): """ Args: input_features (`tf.Tensor` of shape `(batch_size, sequence_length, feature_size)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of floats. See [`~Speech2TextFeatureExtractor.__call__`]
3,294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
3,294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
- 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**.
3,294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ if input_features is None: raise ValueError("You have to specify input_features") inputs_embeds = self.conv(input_features) inputs_embeds = self.embed_scale * inputs_embeds
3,294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
# subsample attention mask if necessary if attention_mask is not None: attention_mask = self._get_feature_vector_attention_mask(tf.shape(inputs_embeds)[1], attention_mask) padding_mask = tf.cast(tf.math.not_equal(attention_mask, 1), tf.int64) else: padding_mask = tf.zeros(tf.shape(inputs_embeds)[:-1], dtype=tf.int64) embed_pos = self.embed_positions(padding_mask) hidden_states = inputs_embeds + embed_pos hidden_states = self.dropout(hidden_states, training=training) # check attention mask and invert if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None
3,294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
# check if head_mask has a correct number of layers specified if desired if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), message=( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(head_mask)[0]}." ), ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): # skip the layer continue
3,294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
hidden_states, attn = encoder_layer( hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, training=training, ) if output_attentions: all_attentions += (attn,) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
3,294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build(None) if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None)
3,294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
class TFSpeech2TextDecoder(keras.layers.Layer): config_class = Speech2TextConfig """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFSpeech2TextDecoderLayer`] Args: config: Speech2TextConfig """ def __init__(self, config: Speech2TextConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.embed_tokens = TFSharedEmbeddings(config.vocab_size, config.d_model, name="embed_tokens") self.embed_positions = TFSpeech2TextSinusoidalPositionalEmbedding( num_positions=config.max_target_positions, embedding_dim=config.d_model, padding_idx=self.padding_idx, name="embed_positions", )
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
self.layers = [TFSpeech2TextDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") self.dropout = keras.layers.Dropout(config.dropout) def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
@unpack_inputs def call( self, input_ids=None, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
[What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`:
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
- 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**.
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*):
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale else: inputs_embeds = inputs_embeds
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) else: combined_attention_mask = _expand_mask( tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1] ) if attention_mask is not None: combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1]) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) # embed positions positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
hidden_states = inputs_embeds + positions hidden_states = self.dropout(hidden_states, training=training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
# check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), message=( f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(attn_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
past_key_value = past_key_values[idx] if past_key_values is not None else None cross_attn_layer_head_mask = cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, ) if use_cache: next_decoder_cache += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if encoder_hidden_states is not None: all_cross_attns += (layer_cross_attn,)
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attns else: return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attns, )
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_tokens", None) is not None: with tf.name_scope(self.embed_tokens.name): self.embed_tokens.build(None) if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None)
3,295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
class TFSpeech2TextMainLayer(keras.layers.Layer): config_class = Speech2TextConfig def __init__(self, config: Speech2TextConfig, **kwargs): super().__init__(**kwargs) self.config = config self.encoder = TFSpeech2TextEncoder(config, name="encoder") self.decoder = TFSpeech2TextDecoder(config, name="decoder") def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, new_embeddings): self.decoder.embed_tokens = new_embeddings
3,296
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
@unpack_inputs def call( self, input_features=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,296
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
if encoder_outputs is None: encoder_outputs = self.encoder( input_features=input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): encoder_outputs = TFBaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
3,296
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
elif not return_dict and not isinstance(encoder_outputs, tuple): encoder_outputs = encoder_outputs.to_tuple()
3,296
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
# downsample encoder attention mask if attention_mask is not None: encoder_attention_mask = self.encoder._get_feature_vector_attention_mask( tf.shape(encoder_outputs[0])[1], attention_mask ) else: encoder_attention_mask = None
3,296
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return decoder_outputs + encoder_outputs
3,296
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
return TFSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None)
3,296
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
class TFSpeech2TextModel(TFSpeech2TextPreTrainedModel): def __init__(self, config: Speech2TextConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFSpeech2TextMainLayer(config, name="model") def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder
3,297
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
@unpack_inputs @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_features: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, decoder_head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None,
3,297
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, **kwargs, ) -> Union[Tuple, TFSeq2SeqModelOutput]: outputs = self.model( input_features=input_features, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, )
3,297
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
return outputs def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
3,297
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
return TFSeq2SeqModelOutput( last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None)
3,297
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
class TFSpeech2TextForConditionalGeneration(TFSpeech2TextPreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config: Speech2TextConfig): super().__init__(config) self.model = TFSpeech2TextMainLayer(config, name="model") self.lm_head = keras.layers.Dense(self.config.vocab_size, use_bias=False, name="lm_head") # TODO (Joao): investigate why Speech2Text has numerical issues in XLA generate self.supports_xla_generation = False self.config = config def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder def resize_token_embeddings(self, new_num_tokens: int) -> tf.Variable: new_embeddings = super().resize_token_embeddings(new_num_tokens) return new_embeddings def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings
3,298
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
@unpack_inputs @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_features: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, decoder_head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None,
3,298
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs, ) -> Union[Tuple, TFSeq2SeqLMOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
3,298
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import Speech2TextProcessor, TFSpeech2TextForConditionalGeneration >>> from datasets import load_dataset >>> import soundfile as sf >>> model = TFSpeech2TextForConditionalGeneration.from_pretrained( ... "facebook/s2t-small-librispeech-asr", from_pt=True ... ) >>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> ds.set_format(type="tf")
3,298
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
>>> input_features = processor( ... ds["speech"][0], sampling_rate=16000, return_tensors="tf" ... ).input_features # Batch size 1 >>> generated_ids = model.generate(input_features) >>> transcription = processor.batch_decode(generated_ids) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id )
3,298
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
outputs = self.model( input_features=input_features, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) lm_logits = self.lm_head(outputs[0]) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
3,298
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py