text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
return causal_mask
3,273
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/modeling_olmoe.py
class OlmoeForCausalLM(OlmoePreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = OlmoeModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.router_aux_loss_coef = config.router_aux_loss_coef self.num_experts = config.num_experts self.num_experts_per_tok = config.num_experts_per_tok # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder
3,274
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/modeling_olmoe.py
def get_decoder(self): return self.model
3,274
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/modeling_olmoe.py
@add_start_docstrings_to_model_forward(OLMOE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, num_logits_to_keep: int = 0, **loss_kwargs, ) -> Union[Tuple, MoeCausalLMOutputWithPast]: r""" Args:
3,274
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/modeling_olmoe.py
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
3,274
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/modeling_olmoe.py
num_logits_to_keep (`int`, *optional*): Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. Returns: Example: ```python >>> from transformers import AutoTokenizer, OlmoeForCausalLM >>> model = OlmoeForCausalLM.from_pretrained("allenai/OLMoE-1B-7B-0924") >>> tokenizer = AutoTokenizer.from_pretrained("allenai/OLMoE-1B-7B-0924") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt")
3,274
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/modeling_olmoe.py
>>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'Hey, are you conscious? Can you talk to me?\nI’m not sure if you’re conscious of this, but I’m' ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,274
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/modeling_olmoe.py
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :]) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)
3,274
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/modeling_olmoe.py
aux_loss = None if output_router_logits: aux_loss = load_balancing_loss_func( outputs.router_logits if return_dict else outputs[-1], self.num_experts, self.num_experts_per_tok, attention_mask, ) if labels is not None: loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device if not return_dict: output = (logits,) + outputs[1:] if output_router_logits: output = (aux_loss,) + output return (loss,) + output if loss is not None else output return MoeCausalLMOutputWithPast( loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits, )
3,274
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/modeling_olmoe.py
class OlmoeConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`OlmoeModel`]. It is used to instantiate an OLMoE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [allenai/OLMoE-1B-7B-0924](https://huggingface.co/allenai/OLMoE-1B-7B-0924). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
Args: vocab_size (`int`, *optional*, defaults to 50304): Vocabulary size of the OLMoE model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OlmoeModel`] hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 2048): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 16): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05):
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 50279): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. clip_qkv (`float`, *optional*):
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
If not `None`, elements of query, key and value attention states are clipped so that their absolute value does not exceed this value. num_experts_per_tok (`int`, *optional*, defaults to 8): Number of selected experts. num_experts (`int`, *optional*, defaults to 64): Number of routed experts. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not the router logits should be returned by the model. Enabeling this will also allow the model to output the auxiliary loss, including load balancing loss and router z-loss. router_aux_loss_coef (`float`, *optional*, defaults to 0.01): The aux loss factor for the total loss. norm_topk_prob (`bool`, *optional*, defaults to `False`): Whether to normalize the topk probabilities.
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
```python >>> from transformers import OlmoeModel, OlmoeConfig >>> # Initializing a OLMoE 7B A1B style configuration >>> configuration = OlmoeConfig() >>> # Initializing a model from the OLMoE 7B A1B style configuration >>> model = OlmoeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "olmoe" keys_to_ignore_at_inference = ["past_key_values"]
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
def __init__( self, vocab_size=50304, hidden_size=2048, intermediate_size=2048, num_hidden_layers=16, num_attention_heads=16, num_key_value_heads=None, hidden_act="silu", max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, clip_qkv=None, num_experts_per_tok=8, num_experts=64, output_router_logits=False, router_aux_loss_coef=0.01, norm_topk_prob=False, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
# for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.clip_qkv = clip_qkv self.num_experts_per_tok = num_experts_per_tok self.num_experts = num_experts self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef self.norm_topk_prob = norm_topk_prob # Validate the correctness of rotary position embeddings parameters # BC: if there is a 'type' field, move it to 'rope_type'. if self.rope_scaling is not None and "type" in self.rope_scaling: self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self)
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, )
3,275
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmoe/configuration_olmoe.py
class Speech2TextTokenizer(PreTrainedTokenizer): """ Construct an Speech2Text tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
Args: vocab_file (`str`): File containing the vocabulary. spm_file (`str`): Path to the [SentencePiece](https://github.com/google/sentencepiece) model file bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. do_upper_case (`bool`, *optional*, defaults to `False`): Whether or not to uppercase the output when decoding. do_lower_case (`bool`, *optional*, defaults to `False`):
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
Whether or not to lowercase the input when tokenizing. tgt_lang (`str`, *optional*): A string representing the target language. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set:
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
- `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. **kwargs Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = []
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
def __init__( self, vocab_file, spm_file, bos_token="<s>", eos_token="</s>", pad_token="<pad>", unk_token="<unk>", do_upper_case=False, do_lower_case=False, tgt_lang=None, lang_codes=None, additional_special_tokens=None, sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.do_upper_case = do_upper_case self.do_lower_case = do_lower_case self.encoder = load_json(vocab_file) self.decoder = {v: k for k, v in self.encoder.items()} self.spm_file = spm_file self.sp_model = load_spm(spm_file, self.sp_model_kwargs)
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
if lang_codes is not None: self.lang_codes = lang_codes self.langs = LANGUAGES[lang_codes] self.lang_tokens = [f"<lang:{lang}>" for lang in self.langs] self.lang_code_to_id = {lang: self.sp_model.PieceToId(f"<lang:{lang}>") for lang in self.langs} if additional_special_tokens is not None: additional_special_tokens = self.lang_tokens + additional_special_tokens else: additional_special_tokens = self.lang_tokens self._tgt_lang = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang) else: self.lang_code_to_id = {}
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, do_upper_case=do_upper_case, do_lower_case=do_lower_case, tgt_lang=tgt_lang, lang_codes=lang_codes, sp_model_kwargs=self.sp_model_kwargs, additional_special_tokens=additional_special_tokens, **kwargs, ) @property def vocab_size(self) -> int: return len(self.encoder) def get_vocab(self) -> Dict: vocab = self.encoder.copy() vocab.update(self.added_tokens_encoder) return vocab @property def tgt_lang(self) -> str: return self._tgt_lang @tgt_lang.setter def tgt_lang(self, new_tgt_lang) -> None: self._tgt_lang = new_tgt_lang self.set_tgt_lang_special_tokens(new_tgt_lang)
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None: """Reset the special tokens to the target language setting. prefix=[eos, tgt_lang_code] and suffix=[eos].""" lang_code_id = self.lang_code_to_id[tgt_lang] self.prefix_tokens = [lang_code_id] def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder[self.unk_token]) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the decoder.""" return self.decoder.get(index, self.unk_token)
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
def convert_tokens_to_string(self, tokens: List[str]) -> str: """Converts a sequence of tokens (strings for sub-words) in a single string.""" current_sub_tokens = [] out_string = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: decoded = self.sp_model.decode(current_sub_tokens) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " current_sub_tokens = [] else: current_sub_tokens.append(token) decoded = self.sp_model.decode(current_sub_tokens) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip()
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]: """Build model inputs from a sequence by appending eos_token_id.""" if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + [self.eos_token_id] def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method.
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
def __getstate__(self) -> Dict: state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d: Dict) -> None: self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: save_dir = Path(save_directory) assert save_dir.is_dir(), f"{save_directory} should be a directory" vocab_save_path = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) spm_save_path = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder, vocab_save_path)
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file): copyfile(self.spm_file, spm_save_path) elif not os.path.isfile(self.spm_file): with open(spm_save_path, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (str(vocab_save_path), str(spm_save_path))
3,276
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py
class Speech2TextFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a Speech2Text feature extractor. This feature extractor inherits from [`Speech2TextFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy otherwise, and applies utterance-level cepstral mean and variance normalization to the extracted features.
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
Args: feature_size (`int`, *optional*, defaults to 80): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). num_mel_bins (`int`, *optional*, defaults to 80): Number of Mel-frequency bins. padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding vectors. do_ceptral_normalize (`bool`, *optional*, defaults to `True`): Whether or not to apply utterance-level cepstral mean and variance normalization to extracted features. normalize_means (`bool`, *optional*, defaults to `True`): Whether or not to zero-mean normalize the extracted features. normalize_vars (`bool`, *optional*, defaults to `True`): Whether or not to unit-variance normalize the extracted features. """
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
model_input_names = ["input_features", "attention_mask"] def __init__( self, feature_size=80, sampling_rate=16000, num_mel_bins=80, padding_value=0.0, do_ceptral_normalize=True, normalize_means=True, normalize_vars=True, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.num_mel_bins = num_mel_bins self.do_ceptral_normalize = do_ceptral_normalize self.normalize_means = normalize_means self.normalize_vars = normalize_vars self.return_attention_mask = True
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
if not is_speech_available(): mel_filters = mel_filter_bank( num_frequency_bins=256, num_mel_filters=self.num_mel_bins, min_frequency=20, max_frequency=sampling_rate // 2, sampling_rate=sampling_rate, norm=None, mel_scale="kaldi", triangularize_in_mel_space=True, ) self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0))) self.window = window_function(400, "povey", periodic=False)
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
def _extract_fbank_features( self, waveform: np.ndarray, ) -> np.ndarray: """ Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs and hence the waveform should not be normalized before feature extraction. """ waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers if is_speech_available(): waveform = torch.from_numpy(waveform).unsqueeze(0) features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate) features = features.numpy() else: waveform = np.squeeze(waveform) features = spectrogram( waveform, self.window, frame_length=400, hop_length=160, fft_length=512, power=2.0, center=False, preemphasis=0.97,
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
mel_filters=self.mel_filters, log_mel="log", mel_floor=1.192092955078125e-07, remove_dc_offset=True, ).T return features
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
@staticmethod def utterance_cmvn( x: np.ndarray, input_length: int, normalize_means: Optional[bool] = True, normalize_vars: Optional[bool] = True, padding_value: float = 0.0, ) -> np.ndarray: # make sure we normalize float32 arrays if normalize_means: mean = x[:input_length].mean(axis=0) x = np.subtract(x, mean) if normalize_vars: std = x[:input_length].std(axis=0) x = np.divide(x, std) if input_length < x.shape[0]: x[input_length:] = padding_value # make sure array is in float32 x = x.astype(np.float32) return x
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
def normalize( self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None ) -> List[np.ndarray]: lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(x, n, self.normalize_means, self.normalize_vars, self.padding_value) for x, n in zip(input_features, lengths) ]
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
def __call__( self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Union[bool, str, PaddingStrategy] = False, max_length: Optional[int] = None, truncation: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, return_attention_mask: Optional[bool] = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s).
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
Args: raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among:
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value.
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) <Tip> For Speech2TextTransformer models, `attention_mask` should always be passed for batched inference, to avoid subtle bugs. </Tip> return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are:
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
- `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values / vectors. """
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." )
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] elif not is_batched and not isinstance(raw_speech, np.ndarray): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) # always return batch if not is_batched: raw_speech = [raw_speech] # extract fbank features features = [self._extract_fbank_features(waveform) for waveform in raw_speech]
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
# convert into correct format for padding encoded_inputs = BatchFeature({"input_features": features}) padded_inputs = self.pad( encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, **kwargs, ) # make sure list is in array format input_features = padded_inputs.get("input_features") if isinstance(input_features[0], list): padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features] attention_mask = padded_inputs.get("attention_mask") if attention_mask is not None: padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
# Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: attention_mask = ( np.array(attention_mask, dtype=np.int32) if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD else None ) padded_inputs["input_features"] = self.normalize( padded_inputs["input_features"], attention_mask=attention_mask ) if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs
3,277
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
class Conv1dSubsampler(nn.Module): """ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation via gated linear units (https://arxiv.org/abs/1911.08460) """ def __init__(self, config): super(Conv1dSubsampler, self).__init__() self.config = config self.num_layers = config.num_conv_layers self.in_channels = config.input_feat_per_channel * config.input_channels self.mid_channels = config.conv_channels self.out_channels = config.d_model self.kernel_sizes = config.conv_kernel_sizes self.conv_layers = nn.ModuleList( nn.Conv1d( self.in_channels if i == 0 else self.mid_channels // 2, self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2, kernel_size=k, stride=2, padding=k // 2, ) for i, k in enumerate(self.kernel_sizes) )
3,278
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
def forward(self, input_features): hidden_states = input_features.transpose(1, 2).contiguous() # -> B x (C x D) x T for conv in self.conv_layers: hidden_states = conv(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=1) hidden_states = hidden_states.transpose(1, 2).contiguous() # -> T x B x (C x D) return hidden_states
3,278
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
class Speech2TextSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.weights = nn.Parameter(emb_weights) self.weights.requires_grad = False self.weights.detach_()
3,279
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
@staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype())
3,279
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
@torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): bsz, seq_len = input_ids.size() # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( input_ids.device ) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
3,279
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
def create_position_ids_from_input_ids( self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0 ): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx
3,279
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
class Speech2TextAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[Speech2TextConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal
3,280
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel"""
3,280
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size()
3,280
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
3,280
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
3,280
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape)
3,280
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1)
3,280
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
3,280
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" )
3,280
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value
3,280
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
class Speech2TextEncoderLayer(nn.Module): def __init__(self, config: Speech2TextConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim)
3,281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states)
3,281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states
3,281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
3,281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
class Speech2TextDecoderLayer(nn.Module): def __init__(self, config: Speech2TextConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout
3,282
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim)
3,282
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
3,282
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states)
3,282
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states
3,282
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states)
3,282
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value
3,282
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs
3,282
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
class Speech2TextPreTrainedModel(PreTrainedModel): config_class = Speech2TextConfig base_model_prefix = "model" main_input_name = "input_features" supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ for i in range(self.config.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths
3,283
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask): # generate creates 3D attention mask, because of the shape of input_features # convert it to 2D if thats the case if len(attention_mask.shape) > 2: attention_mask = attention_mask[:, :, -1] subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)) bsz = attention_mask.size()[0] attention_mask = torch.zeros( (bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long() return attention_mask
3,283
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
class Speech2TextEncoder(Speech2TextPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`Speech2TextEncoderLayer`]. Args: config: Speech2TextConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: Speech2TextConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.conv = Conv1dSubsampler(config)
3,284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
self.embed_positions = Speech2TextSinusoidalPositionalEmbedding( self.max_source_positions, embed_dim, self.padding_idx, ) self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init()
3,284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
def forward( self, input_features, attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_features (`torch.LongTensor` of shape `(batch_size, sequence_length, feature_size)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`]
3,284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`:
3,284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
- 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**.
3,284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict inputs_embeds = self.conv(input_features)
3,284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
inputs_embeds = self.embed_scale * inputs_embeds
3,284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# subsample attention mask if necessary if attention_mask is not None: attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask) padding_mask = attention_mask.ne(1).long() else: padding_mask = torch.zeros(inputs_embeds.shape[:2], dtype=torch.long, device=inputs_embeds.device) embed_pos = self.embed_positions(padding_mask) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None
3,284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
# check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True
3,284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],)
3,284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
3,284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
class Speech2TextDecoder(Speech2TextPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`Speech2TextDecoderLayer`] Args: config: Speech2TextConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: Speech2TextConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = Speech2TextSinusoidalPositionalEmbedding( self.max_target_positions, config.d_model, self.padding_idx, )
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
self.layers = nn.ModuleList([Speech2TextDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
[What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`:
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
- 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**.
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*):
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py