| import os |
| from subprocess import CalledProcessError |
| from typing import Optional |
|
|
| os.environ['HF_HUB_CACHE'] = './checkpoints/hf_cache' |
| import json |
| import re |
| import time |
| import librosa |
| import torch |
| import torchaudio |
| from torch.nn.utils.rnn import pad_sequence |
|
|
| import warnings |
|
|
| warnings.filterwarnings("ignore", category=FutureWarning) |
| warnings.filterwarnings("ignore", category=UserWarning) |
|
|
| from omegaconf import OmegaConf |
|
|
| from indextts.gpt.model_v2 import UnifiedVoice |
| from indextts.utils.maskgct_utils import build_semantic_model, build_semantic_codec |
| from indextts.utils.front import TextNormalizer, TextTokenizer |
|
|
| from indextts.s2mel.modules.commons import load_checkpoint2, MyModel |
| from indextts.s2mel.modules.bigvgan import bigvgan |
| from indextts.s2mel.modules.campplus.DTDNN import CAMPPlus |
| from indextts.s2mel.modules.audio import mel_spectrogram |
|
|
| from transformers import AutoTokenizer |
| from modelscope import AutoModelForCausalLM |
| from huggingface_hub import hf_hub_download |
| import safetensors |
| from transformers import SeamlessM4TFeatureExtractor |
| import random |
| import torch.nn.functional as F |
|
|
| class IndexTTS2: |
| @staticmethod |
| def _load_gpt_state_dict(path: str) -> dict: |
| checkpoint = torch.load(path, map_location="cpu") |
| return checkpoint.get("model", checkpoint) |
|
|
| @staticmethod |
| def _infer_vocab_size(state_dict: dict) -> int | None: |
| for key in ("text_embedding.weight", "text_head.weight", "text_head.bias"): |
| tensor = state_dict.get(key) |
| if tensor is not None: |
| return tensor.shape[0] |
| return None |
|
|
| @staticmethod |
| def _resolve_attr(module, key: str): |
| obj = module |
| for part in key.split("."): |
| obj = getattr(obj, part) |
| return obj |
|
|
| @staticmethod |
| def _copy_resized_weight(name: str, param, weight: torch.Tensor) -> None: |
| target = param.data |
| source = weight.to(device=target.device, dtype=target.dtype) |
| if target.shape != source.shape: |
| print(f">> Reshaping GPT parameter '{name}' from {source.shape} to {target.shape}") |
| if target.ndim == 1: |
| length = min(target.shape[0], source.shape[0]) |
| target[:length].copy_(source[:length]) |
| elif target.ndim == 2: |
| rows = min(target.shape[0], source.shape[0]) |
| cols = min(target.shape[1], source.shape[1]) |
| target[:rows, :cols].copy_(source[:rows, :cols]) |
| else: |
| raise ValueError(f"Unsupported tensor rank for '{name}': {target.ndim}") |
|
|
| def _load_gpt_weights(self, model: UnifiedVoice, state_dict: dict) -> None: |
| filtered_state: dict[str, torch.Tensor] = {} |
| for key, value in state_dict.items(): |
| if key.startswith("inference_model."): |
| continue |
| if ".lora_" in key: |
| continue |
| new_key = key.replace(".base_layer.", ".") |
| filtered_state[new_key] = value |
|
|
| resizable_keys = ("text_embedding.weight", "text_head.weight", "text_head.bias") |
| resizable: dict[str, torch.Tensor] = {} |
| for key in resizable_keys: |
| tensor = filtered_state.pop(key, None) |
| if tensor is not None: |
| resizable[key] = tensor |
|
|
| missing, unexpected = model.load_state_dict(filtered_state, strict=False) |
| if missing: |
| print(f">> GPT load missing keys: {missing}") |
| if unexpected: |
| print(f">> GPT load unexpected keys: {unexpected}") |
|
|
| for key, weight in resizable.items(): |
| param = self._resolve_attr(model, key) |
| self._copy_resized_weight(key, param, weight) |
|
|
| def __init__( |
| self, |
| cfg_path="checkpoints/config.yaml", |
| model_dir="checkpoints", |
| is_fp16: bool = False, |
| *, |
| use_fp16: Optional[bool] = None, |
| device: Optional[str] = None, |
| use_cuda_kernel: Optional[bool] = None, |
| use_deepspeed: Optional[bool] = None, |
| use_accel: bool = False, |
| use_torch_compile: bool = False, |
| gpt_checkpoint_path: Optional[str] = None, |
| bpe_model_path: Optional[str] = None, |
| ): |
| """ |
| Args: |
| cfg_path (str): path to the config file. |
| model_dir (str): path to the model directory. |
| is_fp16 (bool): legacy alias for `use_fp16`. |
| use_fp16 (Optional[bool]): whether to run GPT in fp16 when the device supports it. |
| device (str): device to use (e.g., 'cuda:0', 'cpu'). If None, it will be set automatically based on the availability of CUDA/MPS/XPU. |
| use_cuda_kernel (None | bool): whether to use BigVGan custom fused activation CUDA kernel, only for CUDA device. |
| use_deepspeed (Optional[bool]): explicitly enable/disable DeepSpeed (falls back to INDEXTTS_USE_DEEPSPEED when None). |
| use_accel (bool): whether to enable the custom GPT acceleration engine. |
| use_torch_compile (bool): toggle torch.compile optimizations for the S2Mel stack. |
| """ |
| fp16_requested = use_fp16 if use_fp16 is not None else is_fp16 |
| if device is not None: |
| self.device = device |
| self.is_fp16 = bool(fp16_requested) if device != "cpu" else False |
| self.use_cuda_kernel = bool(use_cuda_kernel) if use_cuda_kernel is not None else device.startswith("cuda") |
| elif torch.cuda.is_available(): |
| self.device = "cuda:0" |
| self.is_fp16 = bool(fp16_requested) |
| self.use_cuda_kernel = True if use_cuda_kernel is None else bool(use_cuda_kernel) |
| elif hasattr(torch, "xpu") and torch.xpu.is_available(): |
| self.device = "xpu" |
| self.is_fp16 = bool(fp16_requested) |
| self.use_cuda_kernel = False |
| elif hasattr(torch, "mps") and torch.backends.mps.is_available(): |
| self.device = "mps" |
| self.is_fp16 = False |
| self.use_cuda_kernel = False |
| else: |
| self.device = "cpu" |
| self.is_fp16 = False |
| self.use_cuda_kernel = False |
| print(">> Be patient, it may take a while to run in CPU mode.") |
|
|
| self.cfg = OmegaConf.load(cfg_path) |
| self.model_dir = model_dir |
| self.dtype = torch.float16 if self.is_fp16 else None |
| self.stop_mel_token = self.cfg.gpt.stop_mel_token |
| self.use_accel = use_accel |
| self.use_torch_compile = use_torch_compile |
|
|
| self.qwen_emo = QwenEmotion(os.path.join(self.model_dir, self.cfg.qwen_emo_path)) |
|
|
| dataset_sr = float(OmegaConf.select(self.cfg, "dataset.sample_rate", default=24000)) |
| mel_comp = float(OmegaConf.select(self.cfg, "gpt.mel_length_compression", default=1024)) |
| self.tokens_per_second = dataset_sr / mel_comp if mel_comp > 0 else None |
|
|
| if gpt_checkpoint_path is not None: |
| self.gpt_path = os.path.abspath(gpt_checkpoint_path) |
| else: |
| self.gpt_path = os.path.join(self.model_dir, self.cfg.gpt_checkpoint) |
| if not os.path.isfile(self.gpt_path): |
| raise FileNotFoundError(f"GPT checkpoint not found: {self.gpt_path}") |
| gpt_state = self._load_gpt_state_dict(self.gpt_path) |
| vocab_from_checkpoint = self._infer_vocab_size(gpt_state) |
| if vocab_from_checkpoint: |
| current_vocab = self.cfg.gpt.get("number_text_tokens", vocab_from_checkpoint) |
| if current_vocab != vocab_from_checkpoint: |
| print( |
| f">> Adjusting GPT config vocab size from " |
| f"{current_vocab} to {vocab_from_checkpoint} based on checkpoint." |
| ) |
| self.cfg.gpt.number_text_tokens = vocab_from_checkpoint |
|
|
| self.gpt = UnifiedVoice(**self.cfg.gpt, use_accel=self.use_accel) |
| self._load_gpt_weights(self.gpt, gpt_state) |
| self.gpt = self.gpt.to(self.device) |
| if self.is_fp16: |
| self.gpt.eval().half() |
| else: |
| self.gpt.eval() |
| print(">> GPT weights restored from:", self.gpt_path) |
|
|
| if use_deepspeed is None: |
| use_deepspeed = os.environ.get("INDEXTTS_USE_DEEPSPEED", "1") != "0" |
| else: |
| os.environ["INDEXTTS_USE_DEEPSPEED"] = "1" if use_deepspeed else "0" |
| if use_deepspeed: |
| try: |
| import deepspeed |
| except (ImportError, OSError, CalledProcessError) as e: |
| use_deepspeed = False |
| print(f">> DeepSpeed加载失败,回退到标准推理: {e}") |
| else: |
| print(">> DeepSpeed usage disabled via INDEXTTS_USE_DEEPSPEED=0") |
|
|
| self.gpt.post_init_gpt2_config(use_deepspeed=use_deepspeed, kv_cache=True, half=self.is_fp16) |
|
|
| if self.use_cuda_kernel: |
| |
| try: |
| from indextts.BigVGAN.alias_free_activation.cuda import load |
|
|
| anti_alias_activation_cuda = load.load() |
| print(">> Preload custom CUDA kernel for BigVGAN", anti_alias_activation_cuda) |
| except: |
| print(">> Failed to load custom CUDA kernel for BigVGAN. Falling back to torch.") |
| self.use_cuda_kernel = False |
|
|
| self.extract_features = SeamlessM4TFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0") |
| self.semantic_model, self.semantic_mean, self.semantic_std = build_semantic_model( |
| os.path.join(self.model_dir, self.cfg.w2v_stat)) |
| self.semantic_model = self.semantic_model.to(self.device) |
| self.semantic_model.eval() |
| self.semantic_mean = self.semantic_mean.to(self.device) |
| self.semantic_std = self.semantic_std.to(self.device) |
|
|
| semantic_codec = build_semantic_codec(self.cfg.semantic_codec) |
| semantic_code_ckpt = hf_hub_download("amphion/MaskGCT", filename="semantic_codec/model.safetensors") |
| safetensors.torch.load_model(semantic_codec, semantic_code_ckpt) |
| self.semantic_codec = semantic_codec.to(self.device) |
| self.semantic_codec.eval() |
| print('>> semantic_codec weights restored from: {}'.format(semantic_code_ckpt)) |
|
|
| s2mel_path = os.path.join(self.model_dir, self.cfg.s2mel_checkpoint) |
| s2mel = MyModel(self.cfg.s2mel, use_gpt_latent=True) |
| s2mel, _, _, _ = load_checkpoint2( |
| s2mel, |
| None, |
| s2mel_path, |
| load_only_params=True, |
| ignore_modules=[], |
| is_distributed=False, |
| ) |
| self.s2mel = s2mel.to(self.device) |
| self.s2mel.models['cfm'].estimator.setup_caches(max_batch_size=1, max_seq_length=8192) |
| if self.use_torch_compile: |
| print(">> Enabling torch.compile optimization") |
| self.s2mel.enable_torch_compile() |
| print(">> torch.compile optimization enabled successfully") |
| self.s2mel.eval() |
| print(">> s2mel weights restored from:", s2mel_path) |
|
|
| |
| campplus_ckpt_path = hf_hub_download( |
| "funasr/campplus", filename="campplus_cn_common.bin" |
| ) |
| campplus_model = CAMPPlus(feat_dim=80, embedding_size=192) |
| campplus_model.load_state_dict(torch.load(campplus_ckpt_path, map_location="cpu")) |
| self.campplus_model = campplus_model.to(self.device) |
| self.campplus_model.eval() |
| print(">> campplus_model weights restored from:", campplus_ckpt_path) |
|
|
| bigvgan_name = self.cfg.vocoder.name |
| self.bigvgan = bigvgan.BigVGAN.from_pretrained(bigvgan_name, use_cuda_kernel=False) |
| self.bigvgan = self.bigvgan.to(self.device) |
| self.bigvgan.remove_weight_norm() |
| self.bigvgan.eval() |
| print(">> bigvgan weights restored from:", bigvgan_name) |
|
|
| if bpe_model_path is not None: |
| self.bpe_path = os.path.abspath(bpe_model_path) |
| else: |
| self.bpe_path = os.path.join(self.model_dir, self.cfg.dataset["bpe_model"]) |
| if not os.path.isfile(self.bpe_path): |
| raise FileNotFoundError(f"BPE tokenizer not found: {self.bpe_path}") |
| self.normalizer = TextNormalizer() |
| self.normalizer.load() |
| print(">> TextNormalizer loaded") |
| self.tokenizer = TextTokenizer(self.bpe_path, self.normalizer) |
| print(">> bpe model loaded from:", self.bpe_path) |
|
|
| emo_matrix = torch.load(os.path.join(self.model_dir, self.cfg.emo_matrix)) |
| self.emo_matrix = emo_matrix.to(self.device) |
| self.emo_num = list(self.cfg.emo_num) |
|
|
| spk_matrix = torch.load(os.path.join(self.model_dir, self.cfg.spk_matrix)) |
| self.spk_matrix = spk_matrix.to(self.device) |
|
|
| self.emo_matrix = torch.split(self.emo_matrix, self.emo_num) |
| self.spk_matrix = torch.split(self.spk_matrix, self.emo_num) |
|
|
| mel_fn_args = { |
| "n_fft": self.cfg.s2mel['preprocess_params']['spect_params']['n_fft'], |
| "win_size": self.cfg.s2mel['preprocess_params']['spect_params']['win_length'], |
| "hop_size": self.cfg.s2mel['preprocess_params']['spect_params']['hop_length'], |
| "num_mels": self.cfg.s2mel['preprocess_params']['spect_params']['n_mels'], |
| "sampling_rate": self.cfg.s2mel["preprocess_params"]["sr"], |
| "fmin": self.cfg.s2mel['preprocess_params']['spect_params'].get('fmin', 0), |
| "fmax": None if self.cfg.s2mel['preprocess_params']['spect_params'].get('fmax', "None") == "None" else 8000, |
| "center": False |
| } |
| self.mel_fn = lambda x: mel_spectrogram(x, **mel_fn_args) |
|
|
| |
| self.cache_spk_cond = None |
| self.cache_s2mel_style = None |
| self.cache_s2mel_prompt = None |
| self.cache_spk_audio_prompt = None |
| self.cache_emo_cond = None |
| self.cache_emo_audio_prompt = None |
| self.cache_mel = None |
|
|
| |
| self.gr_progress = None |
| self.model_version = self.cfg.version if hasattr(self.cfg, "version") else None |
|
|
| @torch.no_grad() |
| def get_emb(self, input_features, attention_mask): |
| vq_emb = self.semantic_model( |
| input_features=input_features, |
| attention_mask=attention_mask, |
| output_hidden_states=True, |
| ) |
| feat = vq_emb.hidden_states[17] |
| feat = (feat - self.semantic_mean) / self.semantic_std |
| return feat |
|
|
| def remove_long_silence(self, codes: torch.Tensor, silent_token=52, max_consecutive=30): |
| """ |
| Shrink special tokens (silent_token and stop_mel_token) in codes |
| codes: [B, T] |
| """ |
| code_lens = [] |
| codes_list = [] |
| device = codes.device |
| dtype = codes.dtype |
| isfix = False |
| for i in range(0, codes.shape[0]): |
| code = codes[i] |
| if not torch.any(code == self.stop_mel_token).item(): |
| len_ = code.size(0) |
| else: |
| stop_mel_idx = (code == self.stop_mel_token).nonzero(as_tuple=False) |
| len_ = stop_mel_idx[0].item() if len(stop_mel_idx) > 0 else code.size(0) |
|
|
| count = torch.sum(code == silent_token).item() |
| if count > max_consecutive: |
| |
| ncode_idx = [] |
| n = 0 |
| for k in range(len_): |
| assert code[ |
| k] != self.stop_mel_token, f"stop_mel_token {self.stop_mel_token} should be shrinked here" |
| if code[k] != silent_token: |
| ncode_idx.append(k) |
| n = 0 |
| elif code[k] == silent_token and n < 10: |
| ncode_idx.append(k) |
| n += 1 |
| |
| |
| |
| len_ = len(ncode_idx) |
| codes_list.append(code[ncode_idx]) |
| isfix = True |
| else: |
| |
| codes_list.append(code[:len_]) |
| code_lens.append(len_) |
| if isfix: |
| if len(codes_list) > 1: |
| codes = pad_sequence(codes_list, batch_first=True, padding_value=self.stop_mel_token) |
| else: |
| codes = codes_list[0].unsqueeze(0) |
| else: |
| |
| pass |
| |
| max_len = max(code_lens) |
| if max_len < codes.shape[1]: |
| codes = codes[:, :max_len] |
| code_lens = torch.tensor(code_lens, dtype=torch.long, device=device) |
| return codes, code_lens |
|
|
| def insert_interval_silence(self, wavs, sampling_rate=22050, interval_silence=200): |
| """ |
| Insert silences between sentences. |
| wavs: List[torch.tensor] |
| """ |
|
|
| if not wavs or interval_silence <= 0: |
| return wavs |
|
|
| |
| channel_size = wavs[0].size(0) |
| |
| sil_dur = int(sampling_rate * interval_silence / 1000.0) |
| sil_tensor = torch.zeros(channel_size, sil_dur) |
|
|
| wavs_list = [] |
| for i, wav in enumerate(wavs): |
| wavs_list.append(wav) |
| if i < len(wavs) - 1: |
| wavs_list.append(sil_tensor) |
|
|
| return wavs_list |
|
|
| def _set_gr_progress(self, value, desc): |
| if self.gr_progress is not None: |
| self.gr_progress(value, desc=desc) |
|
|
| |
| def infer(self, spk_audio_prompt, text, output_path, |
| emo_audio_prompt=None, emo_alpha=1.0, |
| emo_vector=None, |
| use_emo_text=False, emo_text=None, use_random=False, interval_silence=200, |
| duration_seconds=None, |
| verbose=False, max_text_tokens_per_sentence=120, **generation_kwargs): |
| print(">> start inference...") |
| self._set_gr_progress(0, "start inference...") |
| if verbose: |
| print(f"origin text:{text}, spk_audio_prompt:{spk_audio_prompt}," |
| f" emo_audio_prompt:{emo_audio_prompt}, emo_alpha:{emo_alpha}, " |
| f"emo_vector:{emo_vector}, use_emo_text:{use_emo_text}, " |
| f"emo_text:{emo_text}") |
| start_time = time.perf_counter() |
|
|
| if use_emo_text: |
| emo_audio_prompt = None |
| emo_alpha = 1.0 |
| |
| |
| if emo_text is None: |
| emo_text = text |
| emo_dict = self.qwen_emo.inference(emo_text) |
| print(emo_dict) |
| |
| emo_vector = list(emo_dict.values()) |
|
|
| if emo_vector is not None: |
| emo_audio_prompt = None |
| emo_alpha = 1.0 |
| |
| |
|
|
| if emo_audio_prompt is None: |
| emo_audio_prompt = spk_audio_prompt |
| emo_alpha = 1.0 |
| |
|
|
| |
| if self.cache_spk_cond is None or self.cache_spk_audio_prompt != spk_audio_prompt: |
| audio, sr = librosa.load(spk_audio_prompt) |
| audio = torch.tensor(audio).unsqueeze(0) |
| audio_22k = torchaudio.transforms.Resample(sr, 22050)(audio) |
| audio_16k = torchaudio.transforms.Resample(sr, 16000)(audio) |
|
|
| inputs = self.extract_features(audio_16k, sampling_rate=16000, return_tensors="pt") |
| input_features = inputs["input_features"] |
| attention_mask = inputs["attention_mask"] |
| input_features = input_features.to(self.device) |
| attention_mask = attention_mask.to(self.device) |
| spk_cond_emb = self.get_emb(input_features, attention_mask) |
|
|
| _, S_ref = self.semantic_codec.quantize(spk_cond_emb) |
| ref_mel = self.mel_fn(audio_22k.to(spk_cond_emb.device).float()) |
| ref_target_lengths = torch.LongTensor([ref_mel.size(2)]).to(ref_mel.device) |
| feat = torchaudio.compliance.kaldi.fbank(audio_16k.to(ref_mel.device), |
| num_mel_bins=80, |
| dither=0, |
| sample_frequency=16000) |
| feat = feat - feat.mean(dim=0, keepdim=True) |
| style = self.campplus_model(feat.unsqueeze(0)) |
|
|
| prompt_condition = self.s2mel.models['length_regulator'](S_ref, |
| ylens=ref_target_lengths, |
| n_quantizers=3, |
| f0=None)[0] |
|
|
| self.cache_spk_cond = spk_cond_emb |
| self.cache_s2mel_style = style |
| self.cache_s2mel_prompt = prompt_condition |
| self.cache_spk_audio_prompt = spk_audio_prompt |
| self.cache_mel = ref_mel |
| else: |
| style = self.cache_s2mel_style |
| prompt_condition = self.cache_s2mel_prompt |
| spk_cond_emb = self.cache_spk_cond |
| ref_mel = self.cache_mel |
|
|
| if emo_vector is not None: |
| weight_vector = torch.tensor(emo_vector).to(self.device) |
| if use_random: |
| random_index = [random.randint(0, x - 1) for x in self.emo_num] |
| else: |
| random_index = [find_most_similar_cosine(style, tmp) for tmp in self.spk_matrix] |
|
|
| emo_matrix = [tmp[index].unsqueeze(0) for index, tmp in zip(random_index, self.emo_matrix)] |
| emo_matrix = torch.cat(emo_matrix, 0) |
| emovec_mat = weight_vector.unsqueeze(1) * emo_matrix |
| emovec_mat = torch.sum(emovec_mat, 0) |
| emovec_mat = emovec_mat.unsqueeze(0) |
|
|
| if self.cache_emo_cond is None or self.cache_emo_audio_prompt != emo_audio_prompt: |
| emo_audio, _ = librosa.load(emo_audio_prompt, sr=16000) |
| emo_inputs = self.extract_features(emo_audio, sampling_rate=16000, return_tensors="pt") |
| emo_input_features = emo_inputs["input_features"] |
| emo_attention_mask = emo_inputs["attention_mask"] |
| emo_input_features = emo_input_features.to(self.device) |
| emo_attention_mask = emo_attention_mask.to(self.device) |
| emo_cond_emb = self.get_emb(emo_input_features, emo_attention_mask) |
|
|
| self.cache_emo_cond = emo_cond_emb |
| self.cache_emo_audio_prompt = emo_audio_prompt |
| else: |
| emo_cond_emb = self.cache_emo_cond |
|
|
| self._set_gr_progress(0.1, "text processing...") |
| text_tokens_list = self.tokenizer.tokenize(text) |
| sentences = self.tokenizer.split_segments(text_tokens_list, max_text_tokens_per_segment=max_text_tokens_per_sentence) |
| if verbose: |
| print("text_tokens_list:", text_tokens_list) |
| print("sentences count:", len(sentences)) |
| print("max_text_tokens_per_sentence:", max_text_tokens_per_sentence) |
| print(*sentences, sep="\n") |
| do_sample = generation_kwargs.pop("do_sample", True) |
| top_p = generation_kwargs.pop("top_p", 0.8) |
| top_k = generation_kwargs.pop("top_k", 30) |
| temperature = generation_kwargs.pop("temperature", 0.8) |
| autoregressive_batch_size = 1 |
| length_penalty = generation_kwargs.pop("length_penalty", 0.0) |
| num_beams = generation_kwargs.pop("num_beams", 3) |
| repetition_penalty = generation_kwargs.pop("repetition_penalty", 10.0) |
| max_mel_tokens = generation_kwargs.pop("max_mel_tokens", 1500) |
| sampling_rate = 22050 |
|
|
| wavs = [] |
| gpt_gen_time = 0 |
| gpt_forward_time = 0 |
| s2mel_time = 0 |
| bigvgan_time = 0 |
| progress = 0 |
| has_warned = False |
| duration_plan = None |
| target_duration_tokens = None |
| if duration_seconds is not None: |
| if duration_seconds > 0 and self.tokens_per_second: |
| est_tokens = int(duration_seconds * self.tokens_per_second) |
| max_len = getattr(self.gpt, "max_mel_tokens", None) |
| if max_len: |
| est_tokens = min(est_tokens, max_len - 1) |
| target_duration_tokens = max(1, est_tokens) |
| if target_duration_tokens is not None and sentences: |
| per_sentence = max(1, target_duration_tokens // len(sentences)) |
| duration_plan = [per_sentence for _ in sentences] |
| remainder = target_duration_tokens - per_sentence * len(sentences) |
| idx = 0 |
| while remainder > 0 and duration_plan: |
| duration_plan[idx % len(duration_plan)] += 1 |
| remainder -= 1 |
| idx += 1 |
| max_len = getattr(self.gpt, "max_mel_tokens", None) |
| if max_len: |
| duration_plan = [min(t, max_len - 1) for t in duration_plan] |
|
|
| for idx_sent, sent in enumerate(sentences): |
| text_tokens = self.tokenizer.convert_tokens_to_ids(sent) |
| text_tokens = torch.tensor(text_tokens, dtype=torch.int32, device=self.device).unsqueeze(0) |
| if verbose: |
| print(text_tokens) |
| print(f"text_tokens shape: {text_tokens.shape}, text_tokens type: {text_tokens.dtype}") |
| |
| text_token_syms = self.tokenizer.convert_ids_to_tokens(text_tokens[0].tolist()) |
| print("text_token_syms is same as sentence tokens", text_token_syms == sent) |
|
|
| m_start_time = time.perf_counter() |
| with torch.no_grad(): |
| with torch.amp.autocast(text_tokens.device.type, enabled=self.dtype is not None, dtype=self.dtype): |
| emovec = self.gpt.merge_emovec( |
| spk_cond_emb, |
| emo_cond_emb, |
| torch.tensor([spk_cond_emb.shape[-1]], device=text_tokens.device), |
| torch.tensor([emo_cond_emb.shape[-1]], device=text_tokens.device), |
| alpha=emo_alpha |
| ) |
|
|
| if emo_vector is not None: |
| emovec = emovec_mat + (1 - torch.sum(weight_vector)) * emovec |
| |
|
|
| sentence_duration_tokens = None |
| if duration_plan: |
| sentence_duration_tokens = duration_plan[min(idx_sent, len(duration_plan) - 1)] |
| codes, speech_conditioning_latent = self.gpt.inference_speech( |
| spk_cond_emb, |
| text_tokens, |
| emo_cond_emb, |
| cond_lengths=torch.tensor([spk_cond_emb.shape[-1]], device=text_tokens.device), |
| emo_cond_lengths=torch.tensor([emo_cond_emb.shape[-1]], device=text_tokens.device), |
| emo_vec=emovec, |
| do_sample=True, |
| top_p=top_p, |
| top_k=top_k, |
| temperature=temperature, |
| num_return_sequences=autoregressive_batch_size, |
| length_penalty=length_penalty, |
| num_beams=num_beams, |
| repetition_penalty=repetition_penalty, |
| max_generate_length=max_mel_tokens, |
| target_duration_tokens=sentence_duration_tokens, |
| **generation_kwargs |
| ) |
|
|
| gpt_gen_time += time.perf_counter() - m_start_time |
| if not has_warned and (codes[:, -1] != self.stop_mel_token).any(): |
| warnings.warn( |
| f"WARN: generation stopped due to exceeding `max_mel_tokens` ({max_mel_tokens}). " |
| f"Input text tokens: {text_tokens.shape[1]}. " |
| f"Consider reducing `max_text_tokens_per_sentence`({max_text_tokens_per_sentence}) or increasing `max_mel_tokens`.", |
| category=RuntimeWarning |
| ) |
| has_warned = True |
|
|
| code_lens = torch.tensor([codes.shape[-1]], device=codes.device, dtype=codes.dtype) |
| |
| |
| |
| |
|
|
| code_lens = [] |
| for code in codes: |
| if self.stop_mel_token not in code: |
| code_lens.append(len(code)) |
| code_len = len(code) |
| else: |
| len_ = (code == self.stop_mel_token).nonzero(as_tuple=False)[0] + 1 |
| code_len = len_ - 1 |
| code_lens.append(code_len) |
| codes = codes[:, :code_len] |
| code_lens = torch.LongTensor(code_lens) |
| code_lens = code_lens.to(self.device) |
| if verbose: |
| print(codes, type(codes)) |
| print(f"fix codes shape: {codes.shape}, codes type: {codes.dtype}") |
| print(f"code len: {code_lens}") |
|
|
| m_start_time = time.perf_counter() |
| use_speed = torch.zeros(spk_cond_emb.size(0)).to(spk_cond_emb.device).long() |
| with torch.amp.autocast(text_tokens.device.type, enabled=self.dtype is not None, dtype=self.dtype): |
| latent = self.gpt( |
| speech_conditioning_latent, |
| text_tokens, |
| torch.tensor([text_tokens.shape[-1]], device=text_tokens.device), |
| codes, |
| torch.tensor([codes.shape[-1]], device=text_tokens.device), |
| emo_cond_emb, |
| cond_mel_lengths=torch.tensor([spk_cond_emb.shape[-1]], device=text_tokens.device), |
| emo_cond_mel_lengths=torch.tensor([emo_cond_emb.shape[-1]], device=text_tokens.device), |
| emo_vec=emovec, |
| use_speed=use_speed, |
| ) |
| gpt_forward_time += time.perf_counter() - m_start_time |
|
|
| dtype = None |
| with torch.amp.autocast(text_tokens.device.type, enabled=dtype is not None, dtype=dtype): |
| m_start_time = time.perf_counter() |
| diffusion_steps = 25 |
| inference_cfg_rate = 0.7 |
| latent = self.s2mel.models['gpt_layer'](latent) |
| S_infer = self.semantic_codec.quantizer.vq2emb(codes.unsqueeze(1)) |
| S_infer = S_infer.transpose(1, 2) |
| S_infer = S_infer + latent |
| target_lengths = (code_lens * 1.72).long() |
|
|
| cond = self.s2mel.models['length_regulator']( |
| S_infer, |
| ylens=target_lengths, |
| n_quantizers=3, |
| f0=None, |
| )[0] |
| prompt_condition_batch = prompt_condition |
| if prompt_condition_batch.size(0) != cond.size(0): |
| if prompt_condition_batch.size(0) == 1: |
| prompt_condition_batch = prompt_condition_batch.repeat(cond.size(0), 1, 1) |
| elif cond.size(0) == 1: |
| cond = cond.repeat(prompt_condition_batch.size(0), 1, 1) |
| else: |
| min_batch = min(prompt_condition_batch.size(0), cond.size(0)) |
| print( |
| f">> Warning: cond batch {cond.size(0)} mismatch with prompt {prompt_condition_batch.size(0)}; " |
| f"truncating to {min_batch}", |
| flush=True, |
| ) |
| prompt_condition_batch = prompt_condition_batch[:min_batch] |
| cond = cond[:min_batch] |
| if cond.size(0) > 1: |
| print( |
| f">> Warning: cond batch {cond.size(0)} exceeds 1; truncating to the first sample " |
| "to satisfy CFM solver assumptions.", |
| flush=True, |
| ) |
| cond = cond[:1] |
| prompt_condition_batch = prompt_condition_batch[:1] |
| cat_condition = torch.cat([prompt_condition_batch, cond], dim=1) |
|
|
| style_batch = style |
| if style_batch.dim() == 1: |
| style_batch = style_batch.unsqueeze(0) |
| if style_batch.size(0) != cat_condition.size(0): |
| if style_batch.size(0) == 1: |
| style_batch = style_batch.repeat(cat_condition.size(0), 1) |
| else: |
| style_batch = style_batch[:cat_condition.size(0)] |
|
|
| ref_mel_batch = ref_mel |
| if ref_mel_batch.size(0) != cat_condition.size(0): |
| if ref_mel_batch.size(0) == 1: |
| ref_mel_batch = ref_mel_batch.repeat(cat_condition.size(0), 1, 1) |
| else: |
| ref_mel_batch = ref_mel_batch[:cat_condition.size(0)] |
|
|
| mel_lengths = torch.full( |
| (cat_condition.size(0),), |
| cat_condition.size(1), |
| dtype=torch.long, |
| device=cond.device, |
| ) |
| vc_target = self.s2mel.models['cfm'].inference( |
| cat_condition, |
| mel_lengths, |
| ref_mel_batch, |
| style_batch, |
| None, |
| diffusion_steps, |
| inference_cfg_rate=inference_cfg_rate, |
| ) |
| vc_target = vc_target[:, :, ref_mel_batch.size(-1):] |
| s2mel_time += time.perf_counter() - m_start_time |
|
|
| m_start_time = time.perf_counter() |
| wav = self.bigvgan(vc_target.float()).squeeze().unsqueeze(0) |
| print(wav.shape) |
| bigvgan_time += time.perf_counter() - m_start_time |
| wav = wav.squeeze(1) |
|
|
| wav = torch.clamp(32767 * wav, -32767.0, 32767.0) |
| if verbose: |
| print(f"wav shape: {wav.shape}", "min:", wav.min(), "max:", wav.max()) |
| |
| wavs.append(wav.cpu()) |
| end_time = time.perf_counter() |
| self._set_gr_progress(0.9, "save audio...") |
| wavs = self.insert_interval_silence(wavs, sampling_rate=sampling_rate, interval_silence=interval_silence) |
| wav = torch.cat(wavs, dim=1) |
| wav_length = wav.shape[-1] / sampling_rate |
| print(f">> gpt_gen_time: {gpt_gen_time:.2f} seconds") |
| print(f">> gpt_forward_time: {gpt_forward_time:.2f} seconds") |
| print(f">> s2mel_time: {s2mel_time:.2f} seconds") |
| print(f">> bigvgan_time: {bigvgan_time:.2f} seconds") |
| print(f">> Total inference time: {end_time - start_time:.2f} seconds") |
| print(f">> Generated audio length: {wav_length:.2f} seconds") |
| print(f">> RTF: {(end_time - start_time) / wav_length:.4f}") |
|
|
| |
| wav = wav.cpu() |
| if output_path: |
| |
| if os.path.isfile(output_path): |
| os.remove(output_path) |
| print(">> remove old wav file:", output_path) |
| if os.path.dirname(output_path) != "": |
| os.makedirs(os.path.dirname(output_path), exist_ok=True) |
| torchaudio.save(output_path, wav.type(torch.int16), sampling_rate) |
| print(">> wav file saved to:", output_path) |
| return output_path |
| else: |
| |
| wav_data = wav.type(torch.int16) |
| wav_data = wav_data.numpy().T |
| return (sampling_rate, wav_data) |
|
|
|
|
| def find_most_similar_cosine(query_vector, matrix): |
| query_vector = query_vector.float() |
| matrix = matrix.float() |
|
|
| similarities = F.cosine_similarity(query_vector, matrix, dim=1) |
| most_similar_index = torch.argmax(similarities) |
| return most_similar_index |
|
|
| class QwenEmotion: |
| def __init__(self, model_dir): |
| self.model_dir = model_dir |
| self.tokenizer = AutoTokenizer.from_pretrained(self.model_dir) |
| self.model = AutoModelForCausalLM.from_pretrained( |
| self.model_dir, |
| torch_dtype="float16", |
| device_map="auto" |
| ) |
| self.prompt = "文本情感分类" |
| self.cn_key_to_en = { |
| "高兴": "happy", |
| "愤怒": "angry", |
| "悲伤": "sad", |
| "恐惧": "afraid", |
| "反感": "disgusted", |
| |
| |
| |
| |
| "低落": "melancholic", |
| "惊讶": "surprised", |
| "自然": "calm", |
| } |
| self.desired_vector_order = ["高兴", "愤怒", "悲伤", "恐惧", "反感", "低落", "惊讶", "自然"] |
| self.melancholic_words = { |
| |
| |
| "低落", |
| "melancholy", |
| "melancholic", |
| "depression", |
| "depressed", |
| "gloomy", |
| } |
| self.max_score = 1.2 |
| self.min_score = 0.0 |
|
|
| def clamp_score(self, value): |
| return max(self.min_score, min(self.max_score, value)) |
|
|
| def convert(self, content): |
| |
| |
| |
| |
| |
| emotion_dict = { |
| self.cn_key_to_en[cn_key]: self.clamp_score(content.get(cn_key, 0.0)) |
| for cn_key in self.desired_vector_order |
| } |
|
|
| |
| if all(val <= 0.0 for val in emotion_dict.values()): |
| print(">> no emotions detected; using default calm/neutral voice") |
| emotion_dict["calm"] = 1.0 |
|
|
| return emotion_dict |
|
|
| def inference(self, text_input): |
| start = time.time() |
| messages = [ |
| {"role": "system", "content": f"{self.prompt}"}, |
| {"role": "user", "content": f"{text_input}"} |
| ] |
| text = self.tokenizer.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=True, |
| enable_thinking=False, |
| ) |
| model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device) |
|
|
| |
| generated_ids = self.model.generate( |
| **model_inputs, |
| max_new_tokens=32768, |
| pad_token_id=self.tokenizer.eos_token_id |
| ) |
| output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() |
|
|
| |
| try: |
| |
| index = len(output_ids) - output_ids[::-1].index(151668) |
| except ValueError: |
| index = 0 |
|
|
| content = self.tokenizer.decode(output_ids[index:], skip_special_tokens=True) |
|
|
| |
| try: |
| content = json.loads(content) |
| except json.decoder.JSONDecodeError: |
| |
| |
| content = { |
| m.group(1): float(m.group(2)) |
| for m in re.finditer(r'([^\s":.,]+?)"?\s*:\s*([\d.]+)', content) |
| } |
| |
|
|
| |
| |
| |
| text_input_lower = text_input.lower() |
| if any(word in text_input_lower for word in self.melancholic_words): |
| |
| content["悲伤"], content["低落"] = content.get("低落", 0.0), content.get("悲伤", 0.0) |
| |
|
|
| return self.convert(content) |
|
|
|
|
| if __name__ == "__main__": |
| prompt_wav = "examples/voice_01.wav" |
| text = '欢迎大家来体验indextts2,并给予我们意见与反馈,谢谢大家。' |
|
|
| tts = IndexTTS2(cfg_path="checkpoints/config.yaml", model_dir="checkpoints", use_cuda_kernel=False) |
| tts.infer(spk_audio_prompt=prompt_wav, text=text, output_path="gen.wav", verbose=True) |
|
|