text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
def convert_to_ascii(self, text: str) -> str: """ Converts unicode to ascii """ return text.encode("ascii", "ignore").decode("utf-8") def _expand_dollars(self, m: str) -> str: """ This method is used to expand numerical dollar values into spoken words. """ match = m.group(1) parts = match.split(".") if len(parts) > 2: return match + " dollars" # Unexpected format
3,406
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/number_normalizer.py
dollars = int(parts[0]) if parts[0] else 0 cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 if dollars and cents: dollar_unit = "dollar" if dollars == 1 else "dollars" cent_unit = "cent" if cents == 1 else "cents" return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit) elif dollars: dollar_unit = "dollar" if dollars == 1 else "dollars" return "%s %s" % (dollars, dollar_unit) elif cents: cent_unit = "cent" if cents == 1 else "cents" return "%s %s" % (cents, cent_unit) else: return "zero dollars" def _remove_commas(self, m: str) -> str: """ This method is used to remove commas from sentences. """ return m.group(1).replace(",", "")
3,406
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/number_normalizer.py
def _expand_decimal_point(self, m: str) -> str: """ This method is used to expand '.' into spoken word ' point '. """ return m.group(1).replace(".", " point ") def _expand_ordinal(self, num: str) -> str: """ This method is used to expand ordinals such as '1st', '2nd' into spoken words. """ ordinal_suffixes = {1: "st", 2: "nd", 3: "rd"} num = int(num.group(0)[:-2]) if 10 <= num % 100 and num % 100 <= 20: suffix = "th" else: suffix = ordinal_suffixes.get(num % 10, "th") return self.number_to_words(num) + suffix def _expand_number(self, m: str) -> str: """ This method acts as a preprocessing step for numbers between 1000 and 3000 (same as the original repository, link : https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/utils/tokenizer.py#L86) """ num = int(m.group(0))
3,406
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/number_normalizer.py
if num > 1000 and num < 3000: if num == 2000: return "two thousand" elif num > 2000 and num < 2010: return "two thousand " + self.number_to_words(num % 100) elif num % 100 == 0: return self.number_to_words(num // 100) + " hundred" else: return self.number_to_words(num) else: return self.number_to_words(num)
3,406
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/number_normalizer.py
def normalize_numbers(self, text: str) -> str: """ This method is used to normalize numbers within a text such as converting the numbers to words, removing commas, etc. """ text = re.sub(re.compile(r"([0-9][0-9\,]+[0-9])"), self._remove_commas, text) text = re.sub(re.compile(r"£([0-9\,]*[0-9]+)"), r"\1 pounds", text) text = re.sub(re.compile(r"\$([0-9\.\,]*[0-9]+)"), self._expand_dollars, text) text = re.sub(re.compile(r"([0-9]+\.[0-9]+)"), self._expand_decimal_point, text) text = re.sub(re.compile(r"[0-9]+(st|nd|rd|th)"), self._expand_ordinal, text) text = re.sub(re.compile(r"[0-9]+"), self._expand_number, text) return text def expand_abbreviations(self, text: str) -> str: """ Expands the abbreviate words. """ for regex, replacement in self._abbreviations: text = re.sub(regex, replacement, text) return text
3,406
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/number_normalizer.py
def collapse_whitespace(self, text: str) -> str: """ Removes multiple whitespaces """ return re.sub(re.compile(r"\s+"), " ", text) def __call__(self, text): """ Converts text to ascii, numbers / number-like quantities to their spelt-out counterparts and expands abbreviations """ text = self.convert_to_ascii(text) text = text.lower() text = self.normalize_numbers(text) text = self.expand_abbreviations(text) text = self.collapse_whitespace(text) text = text.replace('"', "") return text
3,406
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/number_normalizer.py
class ClvpTokenizer(PreTrainedTokenizer): """ Construct a CLVP tokenizer. Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import ClvpTokenizer >>> tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev") >>> tokenizer("Hello world")["input_ids"] [62, 84, 28, 2, 179, 79] >>> tokenizer(" Hello world")["input_ids"] [2, 62, 84, 28, 2, 179, 79] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip>
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"[STOP]"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"[STOP]"`): The pad token of the sequence. add_prefix_space (`bool`, *optional*, defaults to `False`):
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (CLVP tokenizer detect beginning of words by the preceding space). add_bos_token (`bool`, *optional*, defaults to `False`): Whether to add `bos_token` in front of the sequence when add_special_tokens=True. add_eos_token (`bool`, *optional*, defaults to `False`): Whether to add `eos_token` in end of the sequence when add_special_tokens=True. """
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
vocab_files_names = VOCAB_FILES_NAMES model_input_names = [ "input_ids", "attention_mask", ] def __init__( self, vocab_file, merges_file, errors="replace", unk_token="[UNK]", bos_token="<|endoftext|>", eos_token="[STOP]", pad_token="[STOP]", add_prefix_space=False, add_bos_token=False, add_eos_token=False, **kwargs, ): bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token self.add_bos_token = add_bos_token self.add_eos_token = add_eos_token self._normalizer = None
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
super().__init__( errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, add_bos_token=add_bos_token, add_eos_token=add_eos_token, **kwargs, ) @property def vocab_size(self): return len(self.encoder) @property def normalizer(self): if self._normalizer is None: self._normalizer = EnglishNormalizer() return self._normalizer def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model.
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if not self.add_bos_token: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] text = self.normalizer(text) for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) # if the token is "Ġ" we replace it with "[SPACE]" (if "[SPACE]" is present in the vocab), otherwise we keep the "Ġ". bpe_tokens.extend( "[SPACE]" if bpe_token == "\u0120" and "[SPACE]" in self.encoder.keys() else bpe_token for bpe_token in self.bpe(token).split(" ") ) return bpe_tokens
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
def clean_up_tokenization(self, text): text = "".join(text) vocab_tokens = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys()) text = text.replace("[SPACE]", " ") if "[SPACE]" in vocab_tokens else text text = text.replace("[STOP]", " ") if "[STOP]" in vocab_tokens else text text = text.replace(self.unk_token, "").replace(" ", " ").replace(" ", " ") return text
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file
3,407
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
class ClvpFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a CLVP feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts log-mel-spectrogram features from raw speech using a custom numpy implementation of the `Short Time Fourier Transform` which should match pytorch's `torch.stft` equivalent.
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
Args: feature_size (`int`, *optional*, defaults to 80): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 22050): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). default_audio_length (`int`, *optional*, defaults to 6): The default length of raw audio in seconds. If `max_length` is not set during `__call__` then it will automatically be set to default_audio_length * `self.sampling_rate`. hop_length (`int`, *optional*, defaults to 256): Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients. chunk_length (`int`, *optional*, defaults to 30): The maximum number of chuncks of `sampling_rate` samples used to trim and pad longer or shorter audio sequences. n_fft (`int`, *optional*, defaults to 1024): Size of the Fourier transform.
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
padding_value (`float`, *optional*, defaults to 0.0): Padding value used to pad the audio. Should correspond to silences. mel_norms (`list` of length `feature_size`, *optional*): If `mel_norms` is provided then it will be used to normalize the log-mel spectrograms along each mel-filter. return_attention_mask (`bool`, *optional*, defaults to `False`): Whether to return the attention mask. If left to the default, it will return the attention mask.
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
[What are attention masks?](../glossary#attention-mask) """ model_input_names = ["input_features", "attention_mask"]
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
def __init__( self, feature_size=80, sampling_rate=22050, default_audio_length=6, hop_length=256, chunk_length=30, n_fft=1024, padding_value=0.0, mel_norms=None, return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask **kwargs, ): super().__init__( feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, return_attention_mask=return_attention_mask, **kwargs, ) self.n_fft = n_fft self.hop_length = hop_length self.chunk_length = chunk_length self.n_samples = chunk_length * sampling_rate self.nb_max_frames = self.n_samples // hop_length self.sampling_rate = sampling_rate self.default_audio_length = default_audio_length self.mel_norms = mel_norms self.mel_filters = mel_filter_bank(
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
num_frequency_bins=1 + (n_fft // 2), num_mel_filters=feature_size, min_frequency=0.0, max_frequency=8000.0, sampling_rate=sampling_rate, norm="slaney", mel_scale="htk", )
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray: """ This method first computes the log-mel spectrogram of the provided audio then applies normalization along the each mel-filterbank, if `mel_norms` is provided. """ log_spec = spectrogram( waveform, window_function(self.n_fft, "hann"), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, log_mel=None, ) log_spec = np.log(np.clip(log_spec, a_min=1e-5, a_max=None)) if self.mel_norms is not None: log_spec = log_spec / np.array(self.mel_norms)[:, None] return log_spec
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
def __call__( self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], sampling_rate: Optional[int] = None, truncation: bool = True, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_attention_mask: Optional[bool] = True, padding: Optional[str] = "max_length", max_length: Optional[int] = None, **kwargs, ) -> BatchFeature: """ `ClvpFeatureExtractor` is used to extract various voice specific properties such as the pitch and tone of the voice, speaking speed, and even speaking defects like a lisp or stuttering from a sample voice or `raw_speech`. First the voice is padded or truncated in a way such that it becomes a waveform of `self.default_audio_length` seconds long and then the log-mel spectrogram is extracted from it.
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
Args: raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. truncation (`bool`, *optional*, default to `True`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value.
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_attention_mask (`bool`, *optional*, defaults to `True`): Whether to return the attention mask. If left to the default, it will return the attention mask. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are:
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
- `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values / vectors. max_length (`int`, *optional*): The maximum input length of the inputs. """
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." )
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) if is_batched: raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech] elif not is_batched and not isinstance(raw_speech, np.ndarray): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) # always return batch if not is_batched: raw_speech = [np.asarray([raw_speech]).T] batched_speech = BatchFeature({"input_features": raw_speech})
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
max_length = self.default_audio_length * self.sampling_rate if max_length is None else max_length padded_inputs = self.pad( batched_speech, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) # make sure list is in array format input_features = padded_inputs.get("input_features").transpose(2, 0, 1) input_features = [ self._np_extract_fbank_features(waveform).astype(np.float32) for waveform in input_features[0] ] if isinstance(input_features[0], List): padded_inputs["input_features"] = [np.asarray(feature) for feature in input_features] else: padded_inputs["input_features"] = input_features return padded_inputs.convert_to_tensors(return_tensors)
3,408
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
class ClvpEncoderOutput(ModelOutput): """ Base class for CLVP encoder's outputs that contains a pooling of the last hidden states as well as a projection output (a linear layer on top of the pooled output).
3,409
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
Args: embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when model is initialized with `with_projection=True`): The embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): The hidden state of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Pooled output of the `last_hidden_state`. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
3,409
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """
3,409
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None
3,409
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for speech-text similarity. speech_ids (`torch.LongTensor`, *optional*): speech_ids (or speech candidates) generated by the `ClvpForCausalLM` model. logits_per_speech (`torch.FloatTensor` of shape `(speech_batch_size, text_batch_size)`): The scaled dot product scores between `speech_embeds` and `text_embeds`. This represents the speech-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, speech_batch_size)`): The scaled dot product scores between `text_embeds` and `speech_embeds`. This represents the text-speech similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
3,410
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
The text embeddings obtained by applying the projection layer to the pooled output of the text encoder model. speech_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The speech embeddings obtained by applying the projection layer to the pooled output of the speech encoder model. text_model_output (`BaseModelOutputWithPooling`): The pooled output of the `last_hidden_state` of the text encoder Model. speech_model_output (`BaseModelOutputWithPooling`): The pooled output of the `last_hidden_state` of the speech encoder Model. decoder_hidden_states (`torch.FloatTensor`, *optional*): The hidden states of the decoder model. text_encoder_hidden_states (`torch.FloatTensor`, *optional*): The hidden states of the text encoder model. speech_encoder_hidden_states (`torch.FloatTensor`, *optional*): The hidden states of the speech encoder model. """
3,410
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
loss: Optional[torch.FloatTensor] = None speech_ids: Optional[torch.LongTensor] = None logits_per_speech: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None speech_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None speech_model_output: BaseModelOutputWithPooling = None decoder_hidden_states: torch.FloatTensor = None text_encoder_hidden_states: torch.FloatTensor = None speech_encoder_hidden_states: torch.FloatTensor = None
3,410
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ ClvpRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
3,411
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpRotaryPositionalEmbedding(nn.Module): """ Rotary Position Embedding Class for CLVP. It was proposed in the paper 'ROFORMER: ENHANCED TRANSFORMER WITH ROTARY POSITION EMBEDDING', Please see https://arxiv.org/pdf/2104.09864v1.pdf . """ def __init__(self, config): super().__init__() dim = max(config.projection_dim // (config.num_attention_heads * 2), 32) inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)) self.register_buffer("inv_freq", inv_freq) self.cached_sequence_length = None self.cached_rotary_positional_embedding = None def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: sequence_length = hidden_states.shape[1] if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None: return self.cached_rotary_positional_embedding
3,412
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
self.cached_sequence_length = sequence_length time_stamps = torch.arange(sequence_length, device=hidden_states.device).type_as(self.inv_freq) freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq) embeddings = torch.cat((freqs, freqs), dim=-1) self.cached_rotary_positional_embedding = embeddings.unsqueeze(0) return self.cached_rotary_positional_embedding
3,412
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpSelfAttention(nn.Module): """ Multi-headed attention to combine Absolute and Rotary Positional Embeddings into a single Attention module. """ def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout
3,413
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
if hasattr(config, "max_position_embeddings"): max_positions = config.max_position_embeddings bias = torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)) bias = bias.view(1, 1, max_positions, max_positions) self.register_buffer("bias", bias, persistent=False) self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) # Copied from transformers.models.clip.modeling_clip.CLIPAttention._shape def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
3,413
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
def forward( self, hidden_states: torch.FloatTensor, rotary_pos_emb: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, use_cache: Optional[bool] = False, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]: # Raise error when position_ids is None but rotary_pos_emb is provided, because we need that when applying # rotary_pos_emb to query and key states. if rotary_pos_emb is not None and position_ids is None: raise ValueError("`position_ids` must be provided when `rotary_pos_emb` is not None.") bsz, _, embed_dim = hidden_states.size()
3,413
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
# get query proj query_states = self._shape(self.q_proj(hidden_states), -1, bsz) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if past_key_value is not None: past_key, past_value = past_key_value key_states = torch.cat((past_key, key_states), dim=-2) value_states = torch.cat((past_value, value_states), dim=-2) if use_cache is True: present = (key_states, value_states) else: present = None if rotary_pos_emb is not None: rotary_emb_dim = rotary_pos_emb.shape[-1]
3,413
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
# Partial rotary embedding query_rot, query_pass = ( query_states[..., :rotary_emb_dim], query_states[..., rotary_emb_dim:], ) key_rot, key_pass = ( key_states[..., :rotary_emb_dim], key_states[..., rotary_emb_dim:], ) value_rot, value_pass = ( value_states[..., :rotary_emb_dim], value_states[..., rotary_emb_dim:], ) cos, sin = rotary_pos_emb.cos().squeeze(0), rotary_pos_emb.sin().squeeze(0) query_rot, key_rot, value_rot = apply_rotary_pos_emb(query_rot, key_rot, value_rot, cos, sin, position_ids) # [batch_size, num_heads, seq_length, head_dim] query_states = torch.cat((query_rot, query_pass), dim=-1) key_states = torch.cat((key_rot, key_pass), dim=-1) value_states = torch.cat((value_rot, value_pass), dim=-1)
3,413
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
tgt_len = query_states.shape[2] src_len = key_states.shape[2] attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_probs, value_states)
3,413
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, present, attn_weights
3,413
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpGatedLinearUnit(nn.Module): """ `ClvpGatedLinearUnit` uses the second half of the `hidden_states` to act as a gate for the first half of the `hidden_states` which controls the flow of data from the first of the tensor. """ def __init__(self, config): super().__init__() self.activation_fn = ACT2FN[config.hidden_act] self.proj = nn.Linear(config.hidden_size, config.intermediate_size * 2) def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1) return hidden_states * self.activation_fn(gate)
3,414
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpEncoderMLP(nn.Module): """ This MLP is used in CLVP speech or text encoder models. """ def __init__(self, config): super().__init__() self.config = config self.fc1 = ClvpGatedLinearUnit(config) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout_layer = nn.Dropout(config.dropout) def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: hidden_states = self.fc1(hidden_states) hidden_states = self.dropout_layer(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
3,415
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpEncoderLayer(nn.Module): def __init__(self, config: ClvpConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.self_attn = ClvpSelfAttention(config) self.mlp = ClvpEncoderMLP(config) self.input_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps) self.post_attention_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
3,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
def forward( self, hidden_states: torch.FloatTensor, rotary_pos_emb: torch.FloatTensor, attention_mask: torch.LongTensor, position_ids: torch.LongTensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`): input to the layer. rotary_pos_emb (`torch.FloatTensor`): rotary position embeddings generated by `ClvpRotaryPositionalEmbedding` module. attention_mask (`torch.FloatTensor` of shape `(batch, 1, tgt_len, src_len)`): attention mask where padding elements are indicated by very large negative values. position_ids (`torch.LongTensor`): Denotes position ids of the input tokens. output_attentions (`bool`, *optional*, defaults to `False`):
3,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states
3,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
hidden_states = self.input_rmsnorm(hidden_states) attention_outputs = self.self_attn( hidden_states=hidden_states, rotary_pos_emb=rotary_pos_emb, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, ) hidden_states = attention_outputs[0] hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_rmsnorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[-1],) return outputs
3,416
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpDecoderMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = Conv1D(intermediate_size, embed_dim) self.c_proj = Conv1D(embed_dim, intermediate_size) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
3,417
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpDecoderLayer(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.input_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = ClvpSelfAttention(config) self.post_attention_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = ClvpDecoderMLP(inner_dim, config)
3,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) attn_outputs = self.attn( hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] outputs = attn_outputs[1:] # residual connection
3,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
hidden_states = attn_output + residual
3,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) # residual connection hidden_states = residual + feed_forward_hidden_states if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs
3,418
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpConditioningEncoder(nn.Module): """ This class processes the log-mel spectrograms(extracted by the Feature Extractor) and text tokens(produced by the tokenizer) as inputs for the decoder model. First each log-mel spectrogram is processed into a single vector which captures valuable characteristics from each of them, then the text tokens are converted into token embeddings and position embeddings are added afterwards. Both of these vectors are concatenated and then passed to the decoder model. The text tokens helps to incorporate the "text information" and the log-mel spectrogram is used to specify the "voice characteristics" into the generated mel tokens. """ def __init__(self, config: ClvpConfig): super().__init__() self.text_config = config.text_config self.decoder_config = config.decoder_config
3,419
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
self.text_token_embedding = nn.Embedding(self.text_config.vocab_size, self.decoder_config.hidden_size) self.text_position_embedding = nn.Embedding( self.decoder_config.max_text_tokens, self.decoder_config.hidden_size ) self.mel_conv = nn.Conv1d(self.decoder_config.feature_size, self.decoder_config.hidden_size, kernel_size=1) # define group norms to be used before each attention layer num_groups = self.compute_groupnorm_groups(self.decoder_config.hidden_size) self.group_norms = nn.ModuleList( [ nn.GroupNorm(num_groups, self.decoder_config.hidden_size, eps=1e-5, affine=True) for _ in range(self.decoder_config.num_mel_attn_blocks) ] ) # define the attention layers self.mel_attn_blocks = nn.ModuleList( [ClvpSelfAttention(self.decoder_config) for _ in range(self.decoder_config.num_mel_attn_blocks)] ) self.gradient_checkpointing = False
3,419
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
def compute_groupnorm_groups(self, channels: int, groups: int = 32): """ Calculates the value of `num_groups` for nn.GroupNorm. This logic is taken from the official tortoise repository. link : https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/models/arch_util.py#L26 """ if channels <= 16: groups = 8 elif channels <= 64: groups = 16 while channels % groups != 0: groups = int(groups / 2) if groups <= 2: raise ValueError( f"Number of groups for the GroupNorm must be greater than 2, but it is {groups}." f"Please consider using a different `hidden_size`" ) return groups
3,419
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
def forward( self, input_features: torch.FloatTensor, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): # process text if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.size() elif inputs_embeds is not None: batch_size, seq_length = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") # construct attention mask if not given if attention_mask is None: attention_mask = torch.ones([batch_size, seq_length], dtype=torch.long, device=input_ids.device)
3,419
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
# We add bos and eos input_ids in the modeling file instead of the tokenizer file to keep the logic simple # This logic is specific to ClvpConditioningEncoder and not used by other modules. input_ids, attention_mask = _pad_extra_bos_eos_tokens( input_ids, attention_mask, bos_token_id=self.text_config.bos_token_id, eos_token_id=self.text_config.eos_token_id, ) inputs_embeds = self.text_token_embedding(input_ids) position_ids = attention_mask.cumsum(-1) - 1 position_embeds = self.text_position_embedding(position_ids) text_embeds = inputs_embeds + position_embeds if self.gradient_checkpointing and self.training: # process each log-mel spectrogram into a single vector mel_spec = torch.utils.checkpoint.checkpoint(self.mel_conv, input_features)
3,419
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
for i, mel_attn_block in enumerate(self.mel_attn_blocks): residual_mel_spec = mel_spec.transpose(1, 2) mel_spec = torch.utils.checkpoint.checkpoint(self.group_norms[i], mel_spec).transpose(1, 2) mel_spec = torch.utils.checkpoint.checkpoint(mel_attn_block, mel_spec)[0] + residual_mel_spec mel_spec = mel_spec.transpose(1, 2) else: # process each log-mel spectrogram into a single vector mel_spec = self.mel_conv(input_features) for i, mel_attn_block in enumerate(self.mel_attn_blocks): residual_mel_spec = mel_spec.transpose(1, 2) mel_spec = self.group_norms[i](mel_spec).transpose(1, 2) mel_spec = mel_attn_block(mel_spec)[0] + residual_mel_spec mel_spec = mel_spec.transpose(1, 2) mel_spec = mel_spec[:, :, 0] mel_spec = mel_spec.unsqueeze(1)
3,419
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
# repeat if there is either (1 text vs N audios) or (N texts vs 1 audio) if text_embeds.shape[0] == 1 and mel_spec.shape[0] != 1: text_embeds = text_embeds.repeat(mel_spec.shape[0], 1, 1) elif text_embeds.shape[0] != 1 and mel_spec.shape[0] == 1: mel_spec = mel_spec.repeat(text_embeds.shape[0], 1, 1) # If there is N texts and M audios we will raise error since the number of text and audio must be same. elif text_embeds.shape[0] != mel_spec.shape[0]: raise ValueError( f"The number of texts and number of audios must be same. " f"Found {text_embeds.shape[0]} texts vs {mel_spec.shape[0]} audios" ) return torch.concat([mel_spec, text_embeds], dim=1)
3,419
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ClvpConfig base_model_prefix = "clvp" supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values"
3,420
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, (nn.Linear, Conv1D, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=factor * 0.02) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, ClvpEncoderMLP): factor = self.config.initializer_factor in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.proj.weight if getattr(module.fc1, "proj") else module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, ClvpEncoder): config = self.config.get_text_config()
3,420
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
factor = config.initializer_factor module.projection.weight.data.normal_(mean=0.0, std=factor * (config.hidden_size**-0.5)) elif isinstance(module, ClvpConditioningEncoder): module.mel_conv.weight.data.normal_(mean=0.0, std=factor) module.mel_conv.bias.data.zero_() elif isinstance(module, ClvpForCausalLM): for name, p in module.named_parameters(): if name == "c_proj.weight": p.data.normal_( mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.num_hidden_layers)) ) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
3,420
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpEncoder(ClvpPreTrainedModel): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`ClvpEncoderLayer`]. Args: config: ClvpConfig """ def __init__(self, config: ClvpConfig): super().__init__(config) self.config = config self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size) self.rotary_pos_emb = ClvpRotaryPositionalEmbedding(config) if config.use_rotary_embedding else None self.layers = nn.ModuleList([ClvpEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.sequence_summary = SequenceSummary(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.token_embedding
3,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
def set_input_embeddings(self, value): self.token_embedding = value def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
3,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
[What are input IDs?](../glossary#input-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): input embeddings for the model. This bypasses the model's internal embedding lookup matrix. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**.
3,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
[What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor`, *optional*): Denotes the position ids of `input_ids`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states )
3,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) inputs_embeds = self.token_embedding(input_ids) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") # expand attention_mask and create position_ids if needed if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
3,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange(input_shape[1], dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None rotary_pos_emb = self.rotary_pos_emb(inputs_embeds) if self.rotary_pos_emb is not None else None
3,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = torch.utils.checkpoint.checkpoint( encoder_layer.__call__, hidden_states, rotary_pos_emb, attention_mask, position_ids, ) else: layer_outputs = encoder_layer( hidden_states, rotary_pos_emb, attention_mask, position_ids, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],)
3,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
if output_hidden_states: encoder_states = encoder_states + (hidden_states,) last_hidden_state = hidden_states last_hidden_state = self.final_layer_norm(last_hidden_state) # take the mean over axis 1 and get pooled output pooled_output = self.sequence_summary(last_hidden_state) # apply the projection layer embeds = self.projection(pooled_output) if not return_dict: return tuple( v for v in [embeds, last_hidden_state, pooled_output, encoder_states, all_attentions] if v is not None ) return ClvpEncoderOutput( embeds=embeds, last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_states, attentions=all_attentions, )
3,421
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpDecoder(ClvpPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ClvpDecoderLayer`] """ def __init__(self, config): super().__init__(config) self.config = config self.input_embeds_layer = nn.Embedding(self.config.vocab_size, self.config.hidden_size) self.position_embeds_layer = nn.Embedding(self.config.max_position_embeddings, self.config.hidden_size) self.drop = nn.Dropout(self.config.embd_pdrop) self.layers = nn.ModuleList([ClvpDecoderLayer(self.config) for _ in range(self.config.num_hidden_layers)]) self.layer_norm = nn.LayerNorm(self.config.hidden_size, eps=self.config.layer_norm_epsilon) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.input_embeds_layer
3,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
def set_input_embeddings(self, new_embeddings): self.input_embeds_layer = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.layers[layer].attn.prune_heads(heads)
3,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
@add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = (
3,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1])
3,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
if past_key_values is None: past_key_values_length = 0 past_key_values = tuple([None] * len(self.layers)) else: past_key_values_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange( past_key_values_length, input_shape[-1] + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) if inputs_embeds is None: inputs_embeds = self.input_embeds_layer(input_ids) position_embeds = self.position_embeds_layer(position_ids) inputs_embeds = inputs_embeds + position_embeds attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length )
3,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
# Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x num_attention_heads x N x N # head_mask has shape num_hidden_layers x batch x num_attention_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) hidden_states = inputs_embeds if token_type_ids is not None: token_type_embeds = self.input_embeds_layer(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False
3,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i, (block, past_key_value) in enumerate(zip(self.layers, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,)
3,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
if self.gradient_checkpointing and self.training: outputs = torch.utils.checkpoint.checkpoint( block.__call__, hidden_states, None, attention_mask, position_ids, head_mask[i], ) else: outputs = block( hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],)
3,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None )
3,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, )
3,422
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpModel(ClvpPreTrainedModel): def __init__(self, config: ClvpDecoderConfig): super().__init__(config) self.config = config self.decoder = ClvpDecoder(self.config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.input_embeds_layer def set_input_embeddings(self, value): self.decoder.input_embeds_layer = value def get_decoder(self): return self.decoder
3,423
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
@add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = (
3,423
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,423
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, )
3,423
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
class ClvpForCausalLM(ClvpPreTrainedModel, GenerationMixin): def __init__(self, config): super().__init__(config) self.config = config self.model = ClvpModel(self.config) self.final_norm = nn.LayerNorm(self.config.hidden_size) self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=True) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.input_embeds_layer def set_input_embeddings(self, new_embeddings): self.model.decoder.input_embeds_layer = new_embeddings
3,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
def _prepare_model_inputs( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]: """ This function extracts the model-specific `inputs` for generation. """ input_name = self.main_input_name model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None} inputs_kwarg = model_kwargs.pop(input_name, None) if inputs_kwarg is not None and inputs is not None: raise ValueError( f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed." f"Make sure to either pass {inputs} or {input_name}=..." ) elif inputs_kwarg is not None: inputs = inputs_kwarg
3,424
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py