Initial upload from script
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +11 -0
- .gitignore +1 -0
- Colab/StyleTTS2_Demo_LJSpeech.ipynb +486 -0
- Colab/StyleTTS2_Demo_LibriTTS.ipynb +1218 -0
- Colab/StyleTTS2_Finetune_Demo.ipynb +480 -0
- Configs/config.yml +116 -0
- Configs/config_ft.yml +111 -0
- Configs/config_libritts.yml +113 -0
- Data/OOD_texts.txt +3 -0
- Data/train_list.txt +0 -0
- Data/val_list.txt +100 -0
- Demo/Inference_LJSpeech.ipynb +554 -0
- Demo/Inference_LibriTTS.ipynb +1155 -0
- LICENSE +21 -0
- Modules/__init__.py +1 -0
- Modules/diffusion/__init__.py +1 -0
- Modules/diffusion/diffusion.py +94 -0
- Modules/diffusion/modules.py +693 -0
- Modules/diffusion/sampler.py +691 -0
- Modules/diffusion/utils.py +82 -0
- Modules/discriminators.py +188 -0
- Modules/hifigan.py +477 -0
- Modules/istftnet.py +530 -0
- Modules/slmadv.py +195 -0
- Modules/utils.py +14 -0
- README.md +125 -0
- Utils/ASR/__init__.py +1 -0
- Utils/ASR/config.yml +29 -0
- Utils/ASR/layers.py +354 -0
- Utils/ASR/models.py +186 -0
- Utils/JDC/__init__.py +1 -0
- Utils/JDC/bst.t7 +3 -0
- Utils/JDC/model.py +190 -0
- Utils/PLBERT/config.yml +30 -0
- Utils/PLBERT/step_1000000.t7 +3 -0
- Utils/PLBERT/step_1000000.t7.backup +3 -0
- Utils/PLBERT/util.py +42 -0
- Utils/__init__.py +1 -0
- Utils_original/ASR/__init__.py +1 -0
- Utils_original/ASR/config.yml +29 -0
- Utils_original/ASR/layers.py +354 -0
- Utils_original/ASR/models.py +186 -0
- Utils_original/JDC/__init__.py +1 -0
- Utils_original/JDC/bst.t7 +3 -0
- Utils_original/JDC/model.py +190 -0
- Utils_original/PLBERT/config.yml +30 -0
- Utils_original/PLBERT/step_1000000.t7 +3 -0
- Utils_original/PLBERT/util.py +42 -0
- Utils_original/__init__.py +1 -0
- audio_clone/sangnq_clone.mp3 +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,14 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
Data/OOD_texts.txt filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
Utils/JDC/bst.t7 filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
Utils/PLBERT/step_1000000.t7 filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
Utils/PLBERT/step_1000000.t7.backup filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
Utils_original/JDC/bst.t7 filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
Utils_original/PLBERT/step_1000000.t7 filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
audio_clone/sangnq_clone.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
audio_clone/sangnq_clone.wav filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
audio_ref/huyen_linh_vbee.wav filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
audio_ref/sangnq.wav filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
audio_ref/sangnqVbee_long.mp3 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
__pycache__
|
Colab/StyleTTS2_Demo_LJSpeech.ipynb
ADDED
|
@@ -0,0 +1,486 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"nbformat": 4,
|
| 3 |
+
"nbformat_minor": 0,
|
| 4 |
+
"metadata": {
|
| 5 |
+
"colab": {
|
| 6 |
+
"provenance": [],
|
| 7 |
+
"gpuType": "T4",
|
| 8 |
+
"authorship_tag": "ABX9TyM1x2mx2VnkYNFVlD+DFzmy",
|
| 9 |
+
"include_colab_link": true
|
| 10 |
+
},
|
| 11 |
+
"kernelspec": {
|
| 12 |
+
"name": "python3",
|
| 13 |
+
"display_name": "Python 3"
|
| 14 |
+
},
|
| 15 |
+
"language_info": {
|
| 16 |
+
"name": "python"
|
| 17 |
+
},
|
| 18 |
+
"accelerator": "GPU"
|
| 19 |
+
},
|
| 20 |
+
"cells": [
|
| 21 |
+
{
|
| 22 |
+
"cell_type": "markdown",
|
| 23 |
+
"metadata": {
|
| 24 |
+
"id": "view-in-github",
|
| 25 |
+
"colab_type": "text"
|
| 26 |
+
},
|
| 27 |
+
"source": [
|
| 28 |
+
"<a href=\"https://colab.research.google.com/github/yl4579/StyleTTS2/blob/main/Colab/StyleTTS2_Demo_LJSpeech.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
| 29 |
+
]
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"cell_type": "markdown",
|
| 33 |
+
"source": [
|
| 34 |
+
"### Install packages and download models"
|
| 35 |
+
],
|
| 36 |
+
"metadata": {
|
| 37 |
+
"id": "nm653VK4CG9F"
|
| 38 |
+
}
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"cell_type": "code",
|
| 42 |
+
"source": [
|
| 43 |
+
"%%shell\n",
|
| 44 |
+
"git clone https://github.com/yl4579/StyleTTS2.git\n",
|
| 45 |
+
"cd StyleTTS2\n",
|
| 46 |
+
"pip install SoundFile torchaudio munch torch pydub pyyaml librosa nltk matplotlib accelerate transformers phonemizer einops einops-exts tqdm typing-extensions git+https://github.com/resemble-ai/monotonic_align.git\n",
|
| 47 |
+
"sudo apt-get install espeak-ng\n",
|
| 48 |
+
"git-lfs clone https://huggingface.co/yl4579/StyleTTS2-LJSpeech\n",
|
| 49 |
+
"mv StyleTTS2-LJSpeech/Models ."
|
| 50 |
+
],
|
| 51 |
+
"metadata": {
|
| 52 |
+
"id": "gciBKMqCCLvT"
|
| 53 |
+
},
|
| 54 |
+
"execution_count": null,
|
| 55 |
+
"outputs": []
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"cell_type": "markdown",
|
| 59 |
+
"source": [
|
| 60 |
+
"### Load models"
|
| 61 |
+
],
|
| 62 |
+
"metadata": {
|
| 63 |
+
"id": "OAA8lx-XCQnM"
|
| 64 |
+
}
|
| 65 |
+
},
|
| 66 |
+
{
|
| 67 |
+
"cell_type": "code",
|
| 68 |
+
"source": [
|
| 69 |
+
"%cd StyleTTS2\n",
|
| 70 |
+
"\n",
|
| 71 |
+
"import torch\n",
|
| 72 |
+
"torch.manual_seed(0)\n",
|
| 73 |
+
"torch.backends.cudnn.benchmark = False\n",
|
| 74 |
+
"torch.backends.cudnn.deterministic = True\n",
|
| 75 |
+
"\n",
|
| 76 |
+
"import random\n",
|
| 77 |
+
"random.seed(0)\n",
|
| 78 |
+
"\n",
|
| 79 |
+
"import numpy as np\n",
|
| 80 |
+
"np.random.seed(0)\n",
|
| 81 |
+
"\n",
|
| 82 |
+
"import nltk\n",
|
| 83 |
+
"nltk.download('punkt')\n",
|
| 84 |
+
"\n",
|
| 85 |
+
"# load packages\n",
|
| 86 |
+
"import time\n",
|
| 87 |
+
"import random\n",
|
| 88 |
+
"import yaml\n",
|
| 89 |
+
"from munch import Munch\n",
|
| 90 |
+
"import numpy as np\n",
|
| 91 |
+
"import torch\n",
|
| 92 |
+
"from torch import nn\n",
|
| 93 |
+
"import torch.nn.functional as F\n",
|
| 94 |
+
"import torchaudio\n",
|
| 95 |
+
"import librosa\n",
|
| 96 |
+
"from nltk.tokenize import word_tokenize\n",
|
| 97 |
+
"\n",
|
| 98 |
+
"from models import *\n",
|
| 99 |
+
"from utils import *\n",
|
| 100 |
+
"from text_utils import TextCleaner\n",
|
| 101 |
+
"textclenaer = TextCleaner()\n",
|
| 102 |
+
"\n",
|
| 103 |
+
"%matplotlib inline\n",
|
| 104 |
+
"\n",
|
| 105 |
+
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
|
| 106 |
+
"\n",
|
| 107 |
+
"to_mel = torchaudio.transforms.MelSpectrogram(\n",
|
| 108 |
+
" n_mels=80, n_fft=2048, win_length=1200, hop_length=300)\n",
|
| 109 |
+
"mean, std = -4, 4\n",
|
| 110 |
+
"\n",
|
| 111 |
+
"def length_to_mask(lengths):\n",
|
| 112 |
+
" mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)\n",
|
| 113 |
+
" mask = torch.gt(mask+1, lengths.unsqueeze(1))\n",
|
| 114 |
+
" return mask\n",
|
| 115 |
+
"\n",
|
| 116 |
+
"def preprocess(wave):\n",
|
| 117 |
+
" wave_tensor = torch.from_numpy(wave).float()\n",
|
| 118 |
+
" mel_tensor = to_mel(wave_tensor)\n",
|
| 119 |
+
" mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - mean) / std\n",
|
| 120 |
+
" return mel_tensor\n",
|
| 121 |
+
"\n",
|
| 122 |
+
"def compute_style(ref_dicts):\n",
|
| 123 |
+
" reference_embeddings = {}\n",
|
| 124 |
+
" for key, path in ref_dicts.items():\n",
|
| 125 |
+
" wave, sr = librosa.load(path, sr=24000)\n",
|
| 126 |
+
" audio, index = librosa.effects.trim(wave, top_db=30)\n",
|
| 127 |
+
" if sr != 24000:\n",
|
| 128 |
+
" audio = librosa.resample(audio, sr, 24000)\n",
|
| 129 |
+
" mel_tensor = preprocess(audio).to(device)\n",
|
| 130 |
+
"\n",
|
| 131 |
+
" with torch.no_grad():\n",
|
| 132 |
+
" ref = model.style_encoder(mel_tensor.unsqueeze(1))\n",
|
| 133 |
+
" reference_embeddings[key] = (ref.squeeze(1), audio)\n",
|
| 134 |
+
"\n",
|
| 135 |
+
" return reference_embeddings\n",
|
| 136 |
+
"\n",
|
| 137 |
+
"# load phonemizer\n",
|
| 138 |
+
"import phonemizer\n",
|
| 139 |
+
"global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True, words_mismatch='ignore')\n",
|
| 140 |
+
"\n",
|
| 141 |
+
"config = yaml.safe_load(open(\"Models/LJSpeech/config.yml\"))\n",
|
| 142 |
+
"\n",
|
| 143 |
+
"# load pretrained ASR model\n",
|
| 144 |
+
"ASR_config = config.get('ASR_config', False)\n",
|
| 145 |
+
"ASR_path = config.get('ASR_path', False)\n",
|
| 146 |
+
"text_aligner = load_ASR_models(ASR_path, ASR_config)\n",
|
| 147 |
+
"\n",
|
| 148 |
+
"# load pretrained F0 model\n",
|
| 149 |
+
"F0_path = config.get('F0_path', False)\n",
|
| 150 |
+
"pitch_extractor = load_F0_models(F0_path)\n",
|
| 151 |
+
"\n",
|
| 152 |
+
"# load BERT model\n",
|
| 153 |
+
"from Utils.PLBERT.util import load_plbert\n",
|
| 154 |
+
"BERT_path = config.get('PLBERT_dir', False)\n",
|
| 155 |
+
"plbert = load_plbert(BERT_path)\n",
|
| 156 |
+
"\n",
|
| 157 |
+
"model = build_model(recursive_munch(config['model_params']), text_aligner, pitch_extractor, plbert)\n",
|
| 158 |
+
"_ = [model[key].eval() for key in model]\n",
|
| 159 |
+
"_ = [model[key].to(device) for key in model]\n",
|
| 160 |
+
"\n",
|
| 161 |
+
"params_whole = torch.load(\"Models/LJSpeech/epoch_2nd_00100.pth\", map_location='cpu')\n",
|
| 162 |
+
"params = params_whole['net']\n",
|
| 163 |
+
"\n",
|
| 164 |
+
"for key in model:\n",
|
| 165 |
+
" if key in params:\n",
|
| 166 |
+
" print('%s loaded' % key)\n",
|
| 167 |
+
" try:\n",
|
| 168 |
+
" model[key].load_state_dict(params[key])\n",
|
| 169 |
+
" except:\n",
|
| 170 |
+
" from collections import OrderedDict\n",
|
| 171 |
+
" state_dict = params[key]\n",
|
| 172 |
+
" new_state_dict = OrderedDict()\n",
|
| 173 |
+
" for k, v in state_dict.items():\n",
|
| 174 |
+
" name = k[7:] # remove `module.`\n",
|
| 175 |
+
" new_state_dict[name] = v\n",
|
| 176 |
+
" # load params\n",
|
| 177 |
+
" model[key].load_state_dict(new_state_dict, strict=False)\n",
|
| 178 |
+
"# except:\n",
|
| 179 |
+
"# _load(params[key], model[key])\n",
|
| 180 |
+
"_ = [model[key].eval() for key in model]\n",
|
| 181 |
+
"\n",
|
| 182 |
+
"from Modules.diffusion.sampler import DiffusionSampler, ADPM2Sampler, KarrasSchedule\n",
|
| 183 |
+
"\n",
|
| 184 |
+
"sampler = DiffusionSampler(\n",
|
| 185 |
+
" model.diffusion.diffusion,\n",
|
| 186 |
+
" sampler=ADPM2Sampler(),\n",
|
| 187 |
+
" sigma_schedule=KarrasSchedule(sigma_min=0.0001, sigma_max=3.0, rho=9.0), # empirical parameters\n",
|
| 188 |
+
" clamp=False\n",
|
| 189 |
+
")\n",
|
| 190 |
+
"\n",
|
| 191 |
+
"def inference(text, noise, diffusion_steps=5, embedding_scale=1):\n",
|
| 192 |
+
" text = text.strip()\n",
|
| 193 |
+
" text = text.replace('\"', '')\n",
|
| 194 |
+
" ps = global_phonemizer.phonemize([text])\n",
|
| 195 |
+
" ps = word_tokenize(ps[0])\n",
|
| 196 |
+
" ps = ' '.join(ps)\n",
|
| 197 |
+
"\n",
|
| 198 |
+
" tokens = textclenaer(ps)\n",
|
| 199 |
+
" tokens.insert(0, 0)\n",
|
| 200 |
+
" tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n",
|
| 201 |
+
"\n",
|
| 202 |
+
" with torch.no_grad():\n",
|
| 203 |
+
" input_lengths = torch.LongTensor([tokens.shape[-1]]).to(tokens.device)\n",
|
| 204 |
+
" text_mask = length_to_mask(input_lengths).to(tokens.device)\n",
|
| 205 |
+
"\n",
|
| 206 |
+
" t_en = model.text_encoder(tokens, input_lengths, text_mask)\n",
|
| 207 |
+
" bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())\n",
|
| 208 |
+
" d_en = model.bert_encoder(bert_dur).transpose(-1, -2)\n",
|
| 209 |
+
"\n",
|
| 210 |
+
" s_pred = sampler(noise,\n",
|
| 211 |
+
" embedding=bert_dur[0].unsqueeze(0), num_steps=diffusion_steps,\n",
|
| 212 |
+
" embedding_scale=embedding_scale).squeeze(0)\n",
|
| 213 |
+
"\n",
|
| 214 |
+
" s = s_pred[:, 128:]\n",
|
| 215 |
+
" ref = s_pred[:, :128]\n",
|
| 216 |
+
"\n",
|
| 217 |
+
" d = model.predictor.text_encoder(d_en, s, input_lengths, text_mask)\n",
|
| 218 |
+
"\n",
|
| 219 |
+
" x, _ = model.predictor.lstm(d)\n",
|
| 220 |
+
" duration = model.predictor.duration_proj(x)\n",
|
| 221 |
+
" duration = torch.sigmoid(duration).sum(axis=-1)\n",
|
| 222 |
+
" pred_dur = torch.round(duration.squeeze()).clamp(min=1)\n",
|
| 223 |
+
"\n",
|
| 224 |
+
" pred_dur[-1] += 5\n",
|
| 225 |
+
"\n",
|
| 226 |
+
" pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))\n",
|
| 227 |
+
" c_frame = 0\n",
|
| 228 |
+
" for i in range(pred_aln_trg.size(0)):\n",
|
| 229 |
+
" pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1\n",
|
| 230 |
+
" c_frame += int(pred_dur[i].data)\n",
|
| 231 |
+
"\n",
|
| 232 |
+
" # encode prosody\n",
|
| 233 |
+
" en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 234 |
+
" F0_pred, N_pred = model.predictor.F0Ntrain(en, s)\n",
|
| 235 |
+
" out = model.decoder((t_en @ pred_aln_trg.unsqueeze(0).to(device)),\n",
|
| 236 |
+
" F0_pred, N_pred, ref.squeeze().unsqueeze(0))\n",
|
| 237 |
+
"\n",
|
| 238 |
+
" return out.squeeze().cpu().numpy()\n",
|
| 239 |
+
"\n",
|
| 240 |
+
"def LFinference(text, s_prev, noise, alpha=0.7, diffusion_steps=5, embedding_scale=1):\n",
|
| 241 |
+
" text = text.strip()\n",
|
| 242 |
+
" text = text.replace('\"', '')\n",
|
| 243 |
+
" ps = global_phonemizer.phonemize([text])\n",
|
| 244 |
+
" ps = word_tokenize(ps[0])\n",
|
| 245 |
+
" ps = ' '.join(ps)\n",
|
| 246 |
+
"\n",
|
| 247 |
+
" tokens = textclenaer(ps)\n",
|
| 248 |
+
" tokens.insert(0, 0)\n",
|
| 249 |
+
" tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n",
|
| 250 |
+
"\n",
|
| 251 |
+
" with torch.no_grad():\n",
|
| 252 |
+
" input_lengths = torch.LongTensor([tokens.shape[-1]]).to(tokens.device)\n",
|
| 253 |
+
" text_mask = length_to_mask(input_lengths).to(tokens.device)\n",
|
| 254 |
+
"\n",
|
| 255 |
+
" t_en = model.text_encoder(tokens, input_lengths, text_mask)\n",
|
| 256 |
+
" bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())\n",
|
| 257 |
+
" d_en = model.bert_encoder(bert_dur).transpose(-1, -2)\n",
|
| 258 |
+
"\n",
|
| 259 |
+
" s_pred = sampler(noise,\n",
|
| 260 |
+
" embedding=bert_dur[0].unsqueeze(0), num_steps=diffusion_steps,\n",
|
| 261 |
+
" embedding_scale=embedding_scale).squeeze(0)\n",
|
| 262 |
+
"\n",
|
| 263 |
+
" if s_prev is not None:\n",
|
| 264 |
+
" # convex combination of previous and current style\n",
|
| 265 |
+
" s_pred = alpha * s_prev + (1 - alpha) * s_pred\n",
|
| 266 |
+
"\n",
|
| 267 |
+
" s = s_pred[:, 128:]\n",
|
| 268 |
+
" ref = s_pred[:, :128]\n",
|
| 269 |
+
"\n",
|
| 270 |
+
" d = model.predictor.text_encoder(d_en, s, input_lengths, text_mask)\n",
|
| 271 |
+
"\n",
|
| 272 |
+
" x, _ = model.predictor.lstm(d)\n",
|
| 273 |
+
" duration = model.predictor.duration_proj(x)\n",
|
| 274 |
+
" duration = torch.sigmoid(duration).sum(axis=-1)\n",
|
| 275 |
+
" pred_dur = torch.round(duration.squeeze()).clamp(min=1)\n",
|
| 276 |
+
"\n",
|
| 277 |
+
" pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))\n",
|
| 278 |
+
" c_frame = 0\n",
|
| 279 |
+
" for i in range(pred_aln_trg.size(0)):\n",
|
| 280 |
+
" pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1\n",
|
| 281 |
+
" c_frame += int(pred_dur[i].data)\n",
|
| 282 |
+
"\n",
|
| 283 |
+
" # encode prosody\n",
|
| 284 |
+
" en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 285 |
+
" F0_pred, N_pred = model.predictor.F0Ntrain(en, s)\n",
|
| 286 |
+
" out = model.decoder((t_en @ pred_aln_trg.unsqueeze(0).to(device)),\n",
|
| 287 |
+
" F0_pred, N_pred, ref.squeeze().unsqueeze(0))\n",
|
| 288 |
+
"\n",
|
| 289 |
+
" return out.squeeze().cpu().numpy(), s_pred"
|
| 290 |
+
],
|
| 291 |
+
"metadata": {
|
| 292 |
+
"id": "m0XRpbxSCSix"
|
| 293 |
+
},
|
| 294 |
+
"execution_count": null,
|
| 295 |
+
"outputs": []
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"cell_type": "markdown",
|
| 299 |
+
"source": [
|
| 300 |
+
"### Synthesize speech"
|
| 301 |
+
],
|
| 302 |
+
"metadata": {
|
| 303 |
+
"id": "vuCbS0gdArgJ"
|
| 304 |
+
}
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"cell_type": "code",
|
| 308 |
+
"source": [
|
| 309 |
+
"# @title Input Text { display-mode: \"form\" }\n",
|
| 310 |
+
"# synthesize a text\n",
|
| 311 |
+
"text = \"StyleTTS 2 is a text-to-speech model that leverages style diffusion and adversarial training with large speech language models to achieve human-level text-to-speech synthesis.\" # @param {type:\"string\"}\n"
|
| 312 |
+
],
|
| 313 |
+
"metadata": {
|
| 314 |
+
"id": "7Ud1Y-kbBPTw"
|
| 315 |
+
},
|
| 316 |
+
"execution_count": 3,
|
| 317 |
+
"outputs": []
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"cell_type": "markdown",
|
| 321 |
+
"source": [
|
| 322 |
+
"#### Basic synthesis (5 diffusion steps)"
|
| 323 |
+
],
|
| 324 |
+
"metadata": {
|
| 325 |
+
"id": "TM2NjuM7B6sz"
|
| 326 |
+
}
|
| 327 |
+
},
|
| 328 |
+
{
|
| 329 |
+
"cell_type": "code",
|
| 330 |
+
"source": [
|
| 331 |
+
"start = time.time()\n",
|
| 332 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 333 |
+
"wav = inference(text, noise, diffusion_steps=5, embedding_scale=1)\n",
|
| 334 |
+
"rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 335 |
+
"print(f\"RTF = {rtf:5f}\")\n",
|
| 336 |
+
"import IPython.display as ipd\n",
|
| 337 |
+
"display(ipd.Audio(wav, rate=24000))"
|
| 338 |
+
],
|
| 339 |
+
"metadata": {
|
| 340 |
+
"id": "KILqC-V-Ay5e"
|
| 341 |
+
},
|
| 342 |
+
"execution_count": null,
|
| 343 |
+
"outputs": []
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"cell_type": "markdown",
|
| 347 |
+
"source": [
|
| 348 |
+
"#### With higher diffusion steps (more diverse)\n",
|
| 349 |
+
"Since the sampler is ancestral, the higher the stpes, the more diverse the samples are, with the cost of slower synthesis speed."
|
| 350 |
+
],
|
| 351 |
+
"metadata": {
|
| 352 |
+
"id": "oZk9o-EzCBVx"
|
| 353 |
+
}
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"cell_type": "code",
|
| 357 |
+
"source": [
|
| 358 |
+
"start = time.time()\n",
|
| 359 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 360 |
+
"wav = inference(text, noise, diffusion_steps=10, embedding_scale=1)\n",
|
| 361 |
+
"rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 362 |
+
"print(f\"RTF = {rtf:5f}\")\n",
|
| 363 |
+
"import IPython.display as ipd\n",
|
| 364 |
+
"display(ipd.Audio(wav, rate=24000))"
|
| 365 |
+
],
|
| 366 |
+
"metadata": {
|
| 367 |
+
"id": "9_OHtzMbB9gL"
|
| 368 |
+
},
|
| 369 |
+
"execution_count": null,
|
| 370 |
+
"outputs": []
|
| 371 |
+
},
|
| 372 |
+
{
|
| 373 |
+
"cell_type": "markdown",
|
| 374 |
+
"source": [
|
| 375 |
+
"### Speech expressiveness\n",
|
| 376 |
+
"The following section recreates the samples shown in [Section 6](https://styletts2.github.io/#emo) of the demo page."
|
| 377 |
+
],
|
| 378 |
+
"metadata": {
|
| 379 |
+
"id": "NyDACd-0CaqL"
|
| 380 |
+
}
|
| 381 |
+
},
|
| 382 |
+
{
|
| 383 |
+
"cell_type": "markdown",
|
| 384 |
+
"source": [
|
| 385 |
+
"#### With embedding_scale=1\n",
|
| 386 |
+
"This is the classifier-free guidance scale. The higher the scale, the more conditional the style is to the input text and hence more emotional."
|
| 387 |
+
],
|
| 388 |
+
"metadata": {
|
| 389 |
+
"id": "cRkS5VWxCck4"
|
| 390 |
+
}
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"cell_type": "code",
|
| 394 |
+
"source": [
|
| 395 |
+
"texts = {}\n",
|
| 396 |
+
"texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 397 |
+
"texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 398 |
+
"texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 399 |
+
"texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\"\n",
|
| 400 |
+
"\n",
|
| 401 |
+
"for k,v in texts.items():\n",
|
| 402 |
+
" noise = torch.randn(1,1,256).to(device)\n",
|
| 403 |
+
" wav = inference(v, noise, diffusion_steps=10, embedding_scale=1)\n",
|
| 404 |
+
" print(k + \": \")\n",
|
| 405 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 406 |
+
],
|
| 407 |
+
"metadata": {
|
| 408 |
+
"id": "H5g5RO-mCbZB"
|
| 409 |
+
},
|
| 410 |
+
"execution_count": null,
|
| 411 |
+
"outputs": []
|
| 412 |
+
},
|
| 413 |
+
{
|
| 414 |
+
"cell_type": "markdown",
|
| 415 |
+
"source": [
|
| 416 |
+
"#### With embedding_scale=2"
|
| 417 |
+
],
|
| 418 |
+
"metadata": {
|
| 419 |
+
"id": "f4S8TXSpCgpA"
|
| 420 |
+
}
|
| 421 |
+
},
|
| 422 |
+
{
|
| 423 |
+
"cell_type": "code",
|
| 424 |
+
"source": [
|
| 425 |
+
"texts = {}\n",
|
| 426 |
+
"texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 427 |
+
"texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 428 |
+
"texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 429 |
+
"texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\"\n",
|
| 430 |
+
"\n",
|
| 431 |
+
"for k,v in texts.items():\n",
|
| 432 |
+
" noise = torch.randn(1,1,256).to(device)\n",
|
| 433 |
+
" wav = inference(v, noise, diffusion_steps=10, embedding_scale=2) # embedding_scale=2 for more pronounced emotion\n",
|
| 434 |
+
" print(k + \": \")\n",
|
| 435 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 436 |
+
],
|
| 437 |
+
"metadata": {
|
| 438 |
+
"id": "xHHIdeNrCezC"
|
| 439 |
+
},
|
| 440 |
+
"execution_count": null,
|
| 441 |
+
"outputs": []
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"cell_type": "markdown",
|
| 445 |
+
"source": [
|
| 446 |
+
"### Long-form generation\n",
|
| 447 |
+
"This section includes basic implementation of Algorithm 1 in the paper for consistent longform audio generation. The example passage is taken from [Section 5](https://styletts2.github.io/#long) of the demo page."
|
| 448 |
+
],
|
| 449 |
+
"metadata": {
|
| 450 |
+
"id": "nAh7Tov4CkuH"
|
| 451 |
+
}
|
| 452 |
+
},
|
| 453 |
+
{
|
| 454 |
+
"cell_type": "code",
|
| 455 |
+
"source": [
|
| 456 |
+
"passage = '''If the supply of fruit is greater than the family needs, it may be made a source of income by sending the fresh fruit to the market if there is one near enough, or by preserving, canning, and making jelly for sale. To make such an enterprise a success the fruit and work must be first class. There is magic in the word \"Homemade,\" when the product appeals to the eye and the palate; but many careless and incompetent people have found to their sorrow that this word has not magic enough to float inferior goods on the market. As a rule large canning and preserving establishments are clean and have the best appliances, and they employ chemists and skilled labor. The home product must be very good to compete with the attractive goods that are sent out from such establishments. Yet for first-class homemade products there is a market in all large cities. All first-class grocers have customers who purchase such goods.''' # @param {type:\"string\"}"
|
| 457 |
+
],
|
| 458 |
+
"metadata": {
|
| 459 |
+
"cellView": "form",
|
| 460 |
+
"id": "IJwUbgvACoDu"
|
| 461 |
+
},
|
| 462 |
+
"execution_count": 8,
|
| 463 |
+
"outputs": []
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"cell_type": "code",
|
| 467 |
+
"source": [
|
| 468 |
+
"sentences = passage.split('.') # simple split by comma\n",
|
| 469 |
+
"wavs = []\n",
|
| 470 |
+
"s_prev = None\n",
|
| 471 |
+
"for text in sentences:\n",
|
| 472 |
+
" if text.strip() == \"\": continue\n",
|
| 473 |
+
" text += '.' # add it back\n",
|
| 474 |
+
" noise = torch.randn(1,1,256).to(device)\n",
|
| 475 |
+
" wav, s_prev = LFinference(text, s_prev, noise, alpha=0.7, diffusion_steps=10, embedding_scale=1.5)\n",
|
| 476 |
+
" wavs.append(wav)\n",
|
| 477 |
+
"display(ipd.Audio(np.concatenate(wavs), rate=24000, normalize=False))"
|
| 478 |
+
],
|
| 479 |
+
"metadata": {
|
| 480 |
+
"id": "nP-7i2QAC0JT"
|
| 481 |
+
},
|
| 482 |
+
"execution_count": null,
|
| 483 |
+
"outputs": []
|
| 484 |
+
}
|
| 485 |
+
]
|
| 486 |
+
}
|
Colab/StyleTTS2_Demo_LibriTTS.ipynb
ADDED
|
@@ -0,0 +1,1218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {
|
| 6 |
+
"id": "view-in-github",
|
| 7 |
+
"colab_type": "text"
|
| 8 |
+
},
|
| 9 |
+
"source": [
|
| 10 |
+
"<a href=\"https://colab.research.google.com/github/yl4579/StyleTTS2/blob/main/Colab/StyleTTS2_Demo_LibriTTS.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
| 11 |
+
]
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"cell_type": "markdown",
|
| 15 |
+
"metadata": {
|
| 16 |
+
"id": "aAGQPfgYIR23"
|
| 17 |
+
},
|
| 18 |
+
"source": [
|
| 19 |
+
"### Install packages and download models"
|
| 20 |
+
]
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"cell_type": "code",
|
| 24 |
+
"execution_count": null,
|
| 25 |
+
"metadata": {
|
| 26 |
+
"colab": {
|
| 27 |
+
"base_uri": "https://localhost:8080/"
|
| 28 |
+
},
|
| 29 |
+
"id": "zDPW5uSpISd2",
|
| 30 |
+
"outputId": "6463ff79-18d5-4071-c6ad-01947beeb368"
|
| 31 |
+
},
|
| 32 |
+
"outputs": [
|
| 33 |
+
{
|
| 34 |
+
"output_type": "stream",
|
| 35 |
+
"name": "stdout",
|
| 36 |
+
"text": [
|
| 37 |
+
|
| 38 |
+
]
|
| 39 |
+
}
|
| 40 |
+
],
|
| 41 |
+
"source": [
|
| 42 |
+
"%%shell\n",
|
| 43 |
+
"git clone https://github.com/yl4579/StyleTTS2.git\n",
|
| 44 |
+
"cd StyleTTS2\n",
|
| 45 |
+
"pip install SoundFile torchaudio munch torch pydub pyyaml librosa nltk matplotlib accelerate transformers phonemizer einops einops-exts tqdm typing-extensions git+https://github.com/resemble-ai/monotonic_align.git\n",
|
| 46 |
+
"sudo apt-get install espeak-ng\n",
|
| 47 |
+
"git-lfs clone https://huggingface.co/yl4579/StyleTTS2-LibriTTS\n",
|
| 48 |
+
"mv StyleTTS2-LibriTTS/Models .\n",
|
| 49 |
+
"mv StyleTTS2-LibriTTS/reference_audio.zip .\n",
|
| 50 |
+
"unzip reference_audio.zip\n",
|
| 51 |
+
"mv reference_audio Demo/reference_audio"
|
| 52 |
+
]
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"cell_type": "markdown",
|
| 56 |
+
"metadata": {
|
| 57 |
+
"id": "eJdB_nCOIVIN"
|
| 58 |
+
},
|
| 59 |
+
"source": [
|
| 60 |
+
"### Load models"
|
| 61 |
+
]
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"cell_type": "code",
|
| 65 |
+
"execution_count": null,
|
| 66 |
+
"metadata": {
|
| 67 |
+
"id": "cha8Tr2uJwN0"
|
| 68 |
+
},
|
| 69 |
+
"outputs": [],
|
| 70 |
+
"source": [
|
| 71 |
+
"import nltk\n",
|
| 72 |
+
"nltk.download('punkt')"
|
| 73 |
+
]
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"cell_type": "code",
|
| 77 |
+
"execution_count": null,
|
| 78 |
+
"metadata": {
|
| 79 |
+
"id": "Qoow8Wd8ITtm"
|
| 80 |
+
},
|
| 81 |
+
"outputs": [],
|
| 82 |
+
"source": [
|
| 83 |
+
"%cd StyleTTS2\n",
|
| 84 |
+
"\n",
|
| 85 |
+
"import torch\n",
|
| 86 |
+
"torch.manual_seed(0)\n",
|
| 87 |
+
"torch.backends.cudnn.benchmark = False\n",
|
| 88 |
+
"torch.backends.cudnn.deterministic = True\n",
|
| 89 |
+
"\n",
|
| 90 |
+
"import random\n",
|
| 91 |
+
"random.seed(0)\n",
|
| 92 |
+
"\n",
|
| 93 |
+
"import numpy as np\n",
|
| 94 |
+
"np.random.seed(0)\n",
|
| 95 |
+
"\n",
|
| 96 |
+
"# load packages\n",
|
| 97 |
+
"import time\n",
|
| 98 |
+
"import random\n",
|
| 99 |
+
"import yaml\n",
|
| 100 |
+
"from munch import Munch\n",
|
| 101 |
+
"import numpy as np\n",
|
| 102 |
+
"import torch\n",
|
| 103 |
+
"from torch import nn\n",
|
| 104 |
+
"import torch.nn.functional as F\n",
|
| 105 |
+
"import torchaudio\n",
|
| 106 |
+
"import librosa\n",
|
| 107 |
+
"from nltk.tokenize import word_tokenize\n",
|
| 108 |
+
"\n",
|
| 109 |
+
"from models import *\n",
|
| 110 |
+
"from utils import *\n",
|
| 111 |
+
"from text_utils import TextCleaner\n",
|
| 112 |
+
"textclenaer = TextCleaner()\n",
|
| 113 |
+
"\n",
|
| 114 |
+
"%matplotlib inline\n",
|
| 115 |
+
"\n",
|
| 116 |
+
"to_mel = torchaudio.transforms.MelSpectrogram(\n",
|
| 117 |
+
" n_mels=80, n_fft=2048, win_length=1200, hop_length=300)\n",
|
| 118 |
+
"mean, std = -4, 4\n",
|
| 119 |
+
"\n",
|
| 120 |
+
"def length_to_mask(lengths):\n",
|
| 121 |
+
" mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)\n",
|
| 122 |
+
" mask = torch.gt(mask+1, lengths.unsqueeze(1))\n",
|
| 123 |
+
" return mask\n",
|
| 124 |
+
"\n",
|
| 125 |
+
"def preprocess(wave):\n",
|
| 126 |
+
" wave_tensor = torch.from_numpy(wave).float()\n",
|
| 127 |
+
" mel_tensor = to_mel(wave_tensor)\n",
|
| 128 |
+
" mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - mean) / std\n",
|
| 129 |
+
" return mel_tensor\n",
|
| 130 |
+
"\n",
|
| 131 |
+
"def compute_style(path):\n",
|
| 132 |
+
" wave, sr = librosa.load(path, sr=24000)\n",
|
| 133 |
+
" audio, index = librosa.effects.trim(wave, top_db=30)\n",
|
| 134 |
+
" if sr != 24000:\n",
|
| 135 |
+
" audio = librosa.resample(audio, sr, 24000)\n",
|
| 136 |
+
" mel_tensor = preprocess(audio).to(device)\n",
|
| 137 |
+
"\n",
|
| 138 |
+
" with torch.no_grad():\n",
|
| 139 |
+
" ref_s = model.style_encoder(mel_tensor.unsqueeze(1))\n",
|
| 140 |
+
" ref_p = model.predictor_encoder(mel_tensor.unsqueeze(1))\n",
|
| 141 |
+
"\n",
|
| 142 |
+
" return torch.cat([ref_s, ref_p], dim=1)\n",
|
| 143 |
+
"\n",
|
| 144 |
+
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
|
| 145 |
+
"\n",
|
| 146 |
+
"# load phonemizer\n",
|
| 147 |
+
"import phonemizer\n",
|
| 148 |
+
"global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True)\n",
|
| 149 |
+
"\n",
|
| 150 |
+
"config = yaml.safe_load(open(\"Models/LibriTTS/config.yml\"))\n",
|
| 151 |
+
"\n",
|
| 152 |
+
"# load pretrained ASR model\n",
|
| 153 |
+
"ASR_config = config.get('ASR_config', False)\n",
|
| 154 |
+
"ASR_path = config.get('ASR_path', False)\n",
|
| 155 |
+
"text_aligner = load_ASR_models(ASR_path, ASR_config)\n",
|
| 156 |
+
"\n",
|
| 157 |
+
"# load pretrained F0 model\n",
|
| 158 |
+
"F0_path = config.get('F0_path', False)\n",
|
| 159 |
+
"pitch_extractor = load_F0_models(F0_path)\n",
|
| 160 |
+
"\n",
|
| 161 |
+
"# load BERT model\n",
|
| 162 |
+
"from Utils.PLBERT.util import load_plbert\n",
|
| 163 |
+
"BERT_path = config.get('PLBERT_dir', False)\n",
|
| 164 |
+
"plbert = load_plbert(BERT_path)\n",
|
| 165 |
+
"\n",
|
| 166 |
+
"model_params = recursive_munch(config['model_params'])\n",
|
| 167 |
+
"model = build_model(model_params, text_aligner, pitch_extractor, plbert)\n",
|
| 168 |
+
"_ = [model[key].eval() for key in model]\n",
|
| 169 |
+
"_ = [model[key].to(device) for key in model]\n",
|
| 170 |
+
"\n",
|
| 171 |
+
"params_whole = torch.load(\"Models/LibriTTS/epochs_2nd_00020.pth\", map_location='cpu')\n",
|
| 172 |
+
"params = params_whole['net']\n",
|
| 173 |
+
"\n",
|
| 174 |
+
"for key in model:\n",
|
| 175 |
+
" if key in params:\n",
|
| 176 |
+
" print('%s loaded' % key)\n",
|
| 177 |
+
" try:\n",
|
| 178 |
+
" model[key].load_state_dict(params[key])\n",
|
| 179 |
+
" except:\n",
|
| 180 |
+
" from collections import OrderedDict\n",
|
| 181 |
+
" state_dict = params[key]\n",
|
| 182 |
+
" new_state_dict = OrderedDict()\n",
|
| 183 |
+
" for k, v in state_dict.items():\n",
|
| 184 |
+
" name = k[7:] # remove `module.`\n",
|
| 185 |
+
" new_state_dict[name] = v\n",
|
| 186 |
+
" # load params\n",
|
| 187 |
+
" model[key].load_state_dict(new_state_dict, strict=False)\n",
|
| 188 |
+
"# except:\n",
|
| 189 |
+
"# _load(params[key], model[key])\n",
|
| 190 |
+
"_ = [model[key].eval() for key in model]\n",
|
| 191 |
+
"\n",
|
| 192 |
+
"from Modules.diffusion.sampler import DiffusionSampler, ADPM2Sampler, KarrasSchedule\n",
|
| 193 |
+
"\n",
|
| 194 |
+
"sampler = DiffusionSampler(\n",
|
| 195 |
+
" model.diffusion.diffusion,\n",
|
| 196 |
+
" sampler=ADPM2Sampler(),\n",
|
| 197 |
+
" sigma_schedule=KarrasSchedule(sigma_min=0.0001, sigma_max=3.0, rho=9.0), # empirical parameters\n",
|
| 198 |
+
" clamp=False\n",
|
| 199 |
+
")\n",
|
| 200 |
+
"\n",
|
| 201 |
+
"def inference(text, ref_s, alpha = 0.3, beta = 0.7, diffusion_steps=5, embedding_scale=1):\n",
|
| 202 |
+
" text = text.strip()\n",
|
| 203 |
+
" ps = global_phonemizer.phonemize([text])\n",
|
| 204 |
+
" ps = word_tokenize(ps[0])\n",
|
| 205 |
+
" ps = ' '.join(ps)\n",
|
| 206 |
+
" tokens = textclenaer(ps)\n",
|
| 207 |
+
" tokens.insert(0, 0)\n",
|
| 208 |
+
" tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n",
|
| 209 |
+
"\n",
|
| 210 |
+
" with torch.no_grad():\n",
|
| 211 |
+
" input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)\n",
|
| 212 |
+
" text_mask = length_to_mask(input_lengths).to(device)\n",
|
| 213 |
+
"\n",
|
| 214 |
+
" t_en = model.text_encoder(tokens, input_lengths, text_mask)\n",
|
| 215 |
+
" bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())\n",
|
| 216 |
+
" d_en = model.bert_encoder(bert_dur).transpose(-1, -2)\n",
|
| 217 |
+
"\n",
|
| 218 |
+
" s_pred = sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(device),\n",
|
| 219 |
+
" embedding=bert_dur,\n",
|
| 220 |
+
" embedding_scale=embedding_scale,\n",
|
| 221 |
+
" features=ref_s, # reference from the same speaker as the embedding\n",
|
| 222 |
+
" num_steps=diffusion_steps).squeeze(1)\n",
|
| 223 |
+
"\n",
|
| 224 |
+
"\n",
|
| 225 |
+
" s = s_pred[:, 128:]\n",
|
| 226 |
+
" ref = s_pred[:, :128]\n",
|
| 227 |
+
"\n",
|
| 228 |
+
" ref = alpha * ref + (1 - alpha) * ref_s[:, :128]\n",
|
| 229 |
+
" s = beta * s + (1 - beta) * ref_s[:, 128:]\n",
|
| 230 |
+
"\n",
|
| 231 |
+
" d = model.predictor.text_encoder(d_en,\n",
|
| 232 |
+
" s, input_lengths, text_mask)\n",
|
| 233 |
+
"\n",
|
| 234 |
+
" x, _ = model.predictor.lstm(d)\n",
|
| 235 |
+
" duration = model.predictor.duration_proj(x)\n",
|
| 236 |
+
"\n",
|
| 237 |
+
" duration = torch.sigmoid(duration).sum(axis=-1)\n",
|
| 238 |
+
" pred_dur = torch.round(duration.squeeze()).clamp(min=1)\n",
|
| 239 |
+
"\n",
|
| 240 |
+
"\n",
|
| 241 |
+
" pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))\n",
|
| 242 |
+
" c_frame = 0\n",
|
| 243 |
+
" for i in range(pred_aln_trg.size(0)):\n",
|
| 244 |
+
" pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1\n",
|
| 245 |
+
" c_frame += int(pred_dur[i].data)\n",
|
| 246 |
+
"\n",
|
| 247 |
+
" # encode prosody\n",
|
| 248 |
+
" en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 249 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 250 |
+
" asr_new = torch.zeros_like(en)\n",
|
| 251 |
+
" asr_new[:, :, 0] = en[:, :, 0]\n",
|
| 252 |
+
" asr_new[:, :, 1:] = en[:, :, 0:-1]\n",
|
| 253 |
+
" en = asr_new\n",
|
| 254 |
+
"\n",
|
| 255 |
+
" F0_pred, N_pred = model.predictor.F0Ntrain(en, s)\n",
|
| 256 |
+
"\n",
|
| 257 |
+
" asr = (t_en @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 258 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 259 |
+
" asr_new = torch.zeros_like(asr)\n",
|
| 260 |
+
" asr_new[:, :, 0] = asr[:, :, 0]\n",
|
| 261 |
+
" asr_new[:, :, 1:] = asr[:, :, 0:-1]\n",
|
| 262 |
+
" asr = asr_new\n",
|
| 263 |
+
"\n",
|
| 264 |
+
" out = model.decoder(asr,\n",
|
| 265 |
+
" F0_pred, N_pred, ref.squeeze().unsqueeze(0))\n",
|
| 266 |
+
"\n",
|
| 267 |
+
"\n",
|
| 268 |
+
" return out.squeeze().cpu().numpy()[..., :-50] # weird pulse at the end of the model, need to be fixed later\n",
|
| 269 |
+
"\n",
|
| 270 |
+
"def LFinference(text, s_prev, ref_s, alpha = 0.3, beta = 0.7, t = 0.7, diffusion_steps=5, embedding_scale=1):\n",
|
| 271 |
+
" text = text.strip()\n",
|
| 272 |
+
" ps = global_phonemizer.phonemize([text])\n",
|
| 273 |
+
" ps = word_tokenize(ps[0])\n",
|
| 274 |
+
" ps = ' '.join(ps)\n",
|
| 275 |
+
" ps = ps.replace('``', '\"')\n",
|
| 276 |
+
" ps = ps.replace(\"''\", '\"')\n",
|
| 277 |
+
"\n",
|
| 278 |
+
" tokens = textclenaer(ps)\n",
|
| 279 |
+
" tokens.insert(0, 0)\n",
|
| 280 |
+
" tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n",
|
| 281 |
+
"\n",
|
| 282 |
+
" with torch.no_grad():\n",
|
| 283 |
+
" input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)\n",
|
| 284 |
+
" text_mask = length_to_mask(input_lengths).to(device)\n",
|
| 285 |
+
"\n",
|
| 286 |
+
" t_en = model.text_encoder(tokens, input_lengths, text_mask)\n",
|
| 287 |
+
" bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())\n",
|
| 288 |
+
" d_en = model.bert_encoder(bert_dur).transpose(-1, -2)\n",
|
| 289 |
+
"\n",
|
| 290 |
+
" s_pred = sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(device),\n",
|
| 291 |
+
" embedding=bert_dur,\n",
|
| 292 |
+
" embedding_scale=embedding_scale,\n",
|
| 293 |
+
" features=ref_s, # reference from the same speaker as the embedding\n",
|
| 294 |
+
" num_steps=diffusion_steps).squeeze(1)\n",
|
| 295 |
+
"\n",
|
| 296 |
+
" if s_prev is not None:\n",
|
| 297 |
+
" # convex combination of previous and current style\n",
|
| 298 |
+
" s_pred = t * s_prev + (1 - t) * s_pred\n",
|
| 299 |
+
"\n",
|
| 300 |
+
" s = s_pred[:, 128:]\n",
|
| 301 |
+
" ref = s_pred[:, :128]\n",
|
| 302 |
+
"\n",
|
| 303 |
+
" ref = alpha * ref + (1 - alpha) * ref_s[:, :128]\n",
|
| 304 |
+
" s = beta * s + (1 - beta) * ref_s[:, 128:]\n",
|
| 305 |
+
"\n",
|
| 306 |
+
" s_pred = torch.cat([ref, s], dim=-1)\n",
|
| 307 |
+
"\n",
|
| 308 |
+
" d = model.predictor.text_encoder(d_en,\n",
|
| 309 |
+
" s, input_lengths, text_mask)\n",
|
| 310 |
+
"\n",
|
| 311 |
+
" x, _ = model.predictor.lstm(d)\n",
|
| 312 |
+
" duration = model.predictor.duration_proj(x)\n",
|
| 313 |
+
"\n",
|
| 314 |
+
" duration = torch.sigmoid(duration).sum(axis=-1)\n",
|
| 315 |
+
" pred_dur = torch.round(duration.squeeze()).clamp(min=1)\n",
|
| 316 |
+
"\n",
|
| 317 |
+
"\n",
|
| 318 |
+
" pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))\n",
|
| 319 |
+
" c_frame = 0\n",
|
| 320 |
+
" for i in range(pred_aln_trg.size(0)):\n",
|
| 321 |
+
" pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1\n",
|
| 322 |
+
" c_frame += int(pred_dur[i].data)\n",
|
| 323 |
+
"\n",
|
| 324 |
+
" # encode prosody\n",
|
| 325 |
+
" en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 326 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 327 |
+
" asr_new = torch.zeros_like(en)\n",
|
| 328 |
+
" asr_new[:, :, 0] = en[:, :, 0]\n",
|
| 329 |
+
" asr_new[:, :, 1:] = en[:, :, 0:-1]\n",
|
| 330 |
+
" en = asr_new\n",
|
| 331 |
+
"\n",
|
| 332 |
+
" F0_pred, N_pred = model.predictor.F0Ntrain(en, s)\n",
|
| 333 |
+
"\n",
|
| 334 |
+
" asr = (t_en @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 335 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 336 |
+
" asr_new = torch.zeros_like(asr)\n",
|
| 337 |
+
" asr_new[:, :, 0] = asr[:, :, 0]\n",
|
| 338 |
+
" asr_new[:, :, 1:] = asr[:, :, 0:-1]\n",
|
| 339 |
+
" asr = asr_new\n",
|
| 340 |
+
"\n",
|
| 341 |
+
" out = model.decoder(asr,\n",
|
| 342 |
+
" F0_pred, N_pred, ref.squeeze().unsqueeze(0))\n",
|
| 343 |
+
"\n",
|
| 344 |
+
"\n",
|
| 345 |
+
" return out.squeeze().cpu().numpy()[..., :-100], s_pred # weird pulse at the end of the model, need to be fixed later\n",
|
| 346 |
+
"\n",
|
| 347 |
+
"def STinference(text, ref_s, ref_text, alpha = 0.3, beta = 0.7, diffusion_steps=5, embedding_scale=1):\n",
|
| 348 |
+
" text = text.strip()\n",
|
| 349 |
+
" ps = global_phonemizer.phonemize([text])\n",
|
| 350 |
+
" ps = word_tokenize(ps[0])\n",
|
| 351 |
+
" ps = ' '.join(ps)\n",
|
| 352 |
+
"\n",
|
| 353 |
+
" tokens = textclenaer(ps)\n",
|
| 354 |
+
" tokens.insert(0, 0)\n",
|
| 355 |
+
" tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n",
|
| 356 |
+
"\n",
|
| 357 |
+
" ref_text = ref_text.strip()\n",
|
| 358 |
+
" ps = global_phonemizer.phonemize([ref_text])\n",
|
| 359 |
+
" ps = word_tokenize(ps[0])\n",
|
| 360 |
+
" ps = ' '.join(ps)\n",
|
| 361 |
+
"\n",
|
| 362 |
+
" ref_tokens = textclenaer(ps)\n",
|
| 363 |
+
" ref_tokens.insert(0, 0)\n",
|
| 364 |
+
" ref_tokens = torch.LongTensor(ref_tokens).to(device).unsqueeze(0)\n",
|
| 365 |
+
"\n",
|
| 366 |
+
"\n",
|
| 367 |
+
" with torch.no_grad():\n",
|
| 368 |
+
" input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)\n",
|
| 369 |
+
" text_mask = length_to_mask(input_lengths).to(device)\n",
|
| 370 |
+
"\n",
|
| 371 |
+
" t_en = model.text_encoder(tokens, input_lengths, text_mask)\n",
|
| 372 |
+
" bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())\n",
|
| 373 |
+
" d_en = model.bert_encoder(bert_dur).transpose(-1, -2)\n",
|
| 374 |
+
"\n",
|
| 375 |
+
" ref_input_lengths = torch.LongTensor([ref_tokens.shape[-1]]).to(device)\n",
|
| 376 |
+
" ref_text_mask = length_to_mask(ref_input_lengths).to(device)\n",
|
| 377 |
+
" ref_bert_dur = model.bert(ref_tokens, attention_mask=(~ref_text_mask).int())\n",
|
| 378 |
+
" s_pred = sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(device),\n",
|
| 379 |
+
" embedding=bert_dur,\n",
|
| 380 |
+
" embedding_scale=embedding_scale,\n",
|
| 381 |
+
" features=ref_s, # reference from the same speaker as the embedding\n",
|
| 382 |
+
" num_steps=diffusion_steps).squeeze(1)\n",
|
| 383 |
+
"\n",
|
| 384 |
+
"\n",
|
| 385 |
+
" s = s_pred[:, 128:]\n",
|
| 386 |
+
" ref = s_pred[:, :128]\n",
|
| 387 |
+
"\n",
|
| 388 |
+
" ref = alpha * ref + (1 - alpha) * ref_s[:, :128]\n",
|
| 389 |
+
" s = beta * s + (1 - beta) * ref_s[:, 128:]\n",
|
| 390 |
+
"\n",
|
| 391 |
+
" d = model.predictor.text_encoder(d_en,\n",
|
| 392 |
+
" s, input_lengths, text_mask)\n",
|
| 393 |
+
"\n",
|
| 394 |
+
" x, _ = model.predictor.lstm(d)\n",
|
| 395 |
+
" duration = model.predictor.duration_proj(x)\n",
|
| 396 |
+
"\n",
|
| 397 |
+
" duration = torch.sigmoid(duration).sum(axis=-1)\n",
|
| 398 |
+
" pred_dur = torch.round(duration.squeeze()).clamp(min=1)\n",
|
| 399 |
+
"\n",
|
| 400 |
+
"\n",
|
| 401 |
+
" pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))\n",
|
| 402 |
+
" c_frame = 0\n",
|
| 403 |
+
" for i in range(pred_aln_trg.size(0)):\n",
|
| 404 |
+
" pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1\n",
|
| 405 |
+
" c_frame += int(pred_dur[i].data)\n",
|
| 406 |
+
"\n",
|
| 407 |
+
" # encode prosody\n",
|
| 408 |
+
" en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 409 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 410 |
+
" asr_new = torch.zeros_like(en)\n",
|
| 411 |
+
" asr_new[:, :, 0] = en[:, :, 0]\n",
|
| 412 |
+
" asr_new[:, :, 1:] = en[:, :, 0:-1]\n",
|
| 413 |
+
" en = asr_new\n",
|
| 414 |
+
"\n",
|
| 415 |
+
" F0_pred, N_pred = model.predictor.F0Ntrain(en, s)\n",
|
| 416 |
+
"\n",
|
| 417 |
+
" asr = (t_en @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 418 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 419 |
+
" asr_new = torch.zeros_like(asr)\n",
|
| 420 |
+
" asr_new[:, :, 0] = asr[:, :, 0]\n",
|
| 421 |
+
" asr_new[:, :, 1:] = asr[:, :, 0:-1]\n",
|
| 422 |
+
" asr = asr_new\n",
|
| 423 |
+
"\n",
|
| 424 |
+
" out = model.decoder(asr,\n",
|
| 425 |
+
" F0_pred, N_pred, ref.squeeze().unsqueeze(0))\n",
|
| 426 |
+
"\n",
|
| 427 |
+
"\n",
|
| 428 |
+
" return out.squeeze().cpu().numpy()[..., :-50] # weird pulse at the end of the model, need to be fixed later\n"
|
| 429 |
+
]
|
| 430 |
+
},
|
| 431 |
+
{
|
| 432 |
+
"cell_type": "markdown",
|
| 433 |
+
"metadata": {
|
| 434 |
+
"id": "32S6U0LyJbCA"
|
| 435 |
+
},
|
| 436 |
+
"source": [
|
| 437 |
+
"### Synthesize speech"
|
| 438 |
+
]
|
| 439 |
+
},
|
| 440 |
+
{
|
| 441 |
+
"cell_type": "markdown",
|
| 442 |
+
"metadata": {
|
| 443 |
+
"id": "ehK_0daMJdk_"
|
| 444 |
+
},
|
| 445 |
+
"source": [
|
| 446 |
+
"#### Basic synthesis (5 diffusion steps, seen speakers)"
|
| 447 |
+
]
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"cell_type": "code",
|
| 451 |
+
"execution_count": null,
|
| 452 |
+
"metadata": {
|
| 453 |
+
"id": "SJs2x41MJhM-"
|
| 454 |
+
},
|
| 455 |
+
"outputs": [],
|
| 456 |
+
"source": [
|
| 457 |
+
"text = ''' StyleTTS 2 is a text to speech model that leverages style diffusion and adversarial training with large speech language models to achieve human level text to speech synthesis. ''' # @param {type:\"string\"}\n"
|
| 458 |
+
]
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"cell_type": "code",
|
| 462 |
+
"execution_count": null,
|
| 463 |
+
"metadata": {
|
| 464 |
+
"id": "xuqIJe-IJb7A"
|
| 465 |
+
},
|
| 466 |
+
"outputs": [],
|
| 467 |
+
"source": [
|
| 468 |
+
"reference_dicts = {}\n",
|
| 469 |
+
"reference_dicts['696_92939'] = \"Demo/reference_audio/696_92939_000016_000006.wav\"\n",
|
| 470 |
+
"reference_dicts['1789_142896'] = \"Demo/reference_audio/1789_142896_000022_000005.wav\""
|
| 471 |
+
]
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"cell_type": "code",
|
| 475 |
+
"execution_count": null,
|
| 476 |
+
"metadata": {
|
| 477 |
+
"id": "H3ra3IxJJmF0"
|
| 478 |
+
},
|
| 479 |
+
"outputs": [],
|
| 480 |
+
"source": [
|
| 481 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 482 |
+
"for k, path in reference_dicts.items():\n",
|
| 483 |
+
" ref_s = compute_style(path)\n",
|
| 484 |
+
" start = time.time()\n",
|
| 485 |
+
" wav = inference(text, ref_s, alpha=0.3, beta=0.7, diffusion_steps=5, embedding_scale=1)\n",
|
| 486 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 487 |
+
" print(f\"RTF = {rtf:5f}\")\n",
|
| 488 |
+
" import IPython.display as ipd\n",
|
| 489 |
+
" print(k + ' Synthesized:')\n",
|
| 490 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 491 |
+
" print('Reference:')\n",
|
| 492 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 493 |
+
]
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"cell_type": "markdown",
|
| 497 |
+
"metadata": {
|
| 498 |
+
"id": "aB3wUz6yJ-P_"
|
| 499 |
+
},
|
| 500 |
+
"source": [
|
| 501 |
+
"#### With higher diffusion steps (more diverse)\n",
|
| 502 |
+
"\n",
|
| 503 |
+
"Since the sampler is ancestral, the higher the stpes, the more diverse the samples are, with the cost of slower synthesis speed."
|
| 504 |
+
]
|
| 505 |
+
},
|
| 506 |
+
{
|
| 507 |
+
"cell_type": "code",
|
| 508 |
+
"execution_count": null,
|
| 509 |
+
"metadata": {
|
| 510 |
+
"id": "lF27XUo4JrKk"
|
| 511 |
+
},
|
| 512 |
+
"outputs": [],
|
| 513 |
+
"source": [
|
| 514 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 515 |
+
"for k, path in reference_dicts.items():\n",
|
| 516 |
+
" ref_s = compute_style(path)\n",
|
| 517 |
+
" start = time.time()\n",
|
| 518 |
+
" wav = inference(text, ref_s, alpha=0.3, beta=0.7, diffusion_steps=10, embedding_scale=1)\n",
|
| 519 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 520 |
+
" print(f\"RTF = {rtf:5f}\")\n",
|
| 521 |
+
" import IPython.display as ipd\n",
|
| 522 |
+
" print(k + ' Synthesized:')\n",
|
| 523 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 524 |
+
" print(k + ' Reference:')\n",
|
| 525 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 526 |
+
]
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"cell_type": "markdown",
|
| 530 |
+
"metadata": {
|
| 531 |
+
"id": "pFT_vmJcKDs1"
|
| 532 |
+
},
|
| 533 |
+
"source": [
|
| 534 |
+
"#### Basic synthesis (5 diffusion steps, unseen speakers)\n",
|
| 535 |
+
"The following samples are to reproduce samples in [Section 4](https://styletts2.github.io/#libri) of the demo page. All spsakers are unseen during training. You can compare the generated samples to popular zero-shot TTS models like Vall-E and NaturalSpeech 2."
|
| 536 |
+
]
|
| 537 |
+
},
|
| 538 |
+
{
|
| 539 |
+
"cell_type": "code",
|
| 540 |
+
"execution_count": null,
|
| 541 |
+
"metadata": {
|
| 542 |
+
"id": "HvNAeGPEKAWN"
|
| 543 |
+
},
|
| 544 |
+
"outputs": [],
|
| 545 |
+
"source": [
|
| 546 |
+
"reference_dicts = {}\n",
|
| 547 |
+
"# format: (path, text)\n",
|
| 548 |
+
"reference_dicts['1221-135767'] = (\"Demo/reference_audio/1221-135767-0014.wav\", \"Yea, his honourable worship is within, but he hath a godly minister or two with him, and likewise a leech.\")\n",
|
| 549 |
+
"reference_dicts['5639-40744'] = (\"Demo/reference_audio/5639-40744-0020.wav\", \"Thus did this humane and right minded father comfort his unhappy daughter, and her mother embracing her again, did all she could to soothe her feelings.\")\n",
|
| 550 |
+
"reference_dicts['908-157963'] = (\"Demo/reference_audio/908-157963-0027.wav\", \"And lay me down in my cold bed and leave my shining lot.\")\n",
|
| 551 |
+
"reference_dicts['4077-13754'] = (\"Demo/reference_audio/4077-13754-0000.wav\", \"The army found the people in poverty and left them in comparative wealth.\")"
|
| 552 |
+
]
|
| 553 |
+
},
|
| 554 |
+
{
|
| 555 |
+
"cell_type": "code",
|
| 556 |
+
"execution_count": null,
|
| 557 |
+
"metadata": {
|
| 558 |
+
"id": "mFnyvYp5KAYN"
|
| 559 |
+
},
|
| 560 |
+
"outputs": [],
|
| 561 |
+
"source": [
|
| 562 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 563 |
+
"for k, v in reference_dicts.items():\n",
|
| 564 |
+
" path, text = v\n",
|
| 565 |
+
" ref_s = compute_style(path)\n",
|
| 566 |
+
" start = time.time()\n",
|
| 567 |
+
" wav = inference(text, ref_s, alpha=0.3, beta=0.7, diffusion_steps=5, embedding_scale=1)\n",
|
| 568 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 569 |
+
" print(f\"RTF = {rtf:5f}\")\n",
|
| 570 |
+
" import IPython.display as ipd\n",
|
| 571 |
+
" print(k + ' Synthesized: ' + text)\n",
|
| 572 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 573 |
+
" print(k + ' Reference:')\n",
|
| 574 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 575 |
+
]
|
| 576 |
+
},
|
| 577 |
+
{
|
| 578 |
+
"cell_type": "markdown",
|
| 579 |
+
"metadata": {
|
| 580 |
+
"id": "QBZ53BQtKNQ6"
|
| 581 |
+
},
|
| 582 |
+
"source": [
|
| 583 |
+
"### Speech expressiveness\n",
|
| 584 |
+
"\n",
|
| 585 |
+
"The following section recreates the samples shown in [Section 6](https://styletts2.github.io/#emo) of the demo page. The speaker reference used is `1221-135767-0014.wav`, which is unseen during training.\n",
|
| 586 |
+
"\n",
|
| 587 |
+
"#### With `embedding_scale=1`\n",
|
| 588 |
+
"This is the classifier-free guidance scale. The higher the scale, the more conditional the style is to the input text and hence more emotional."
|
| 589 |
+
]
|
| 590 |
+
},
|
| 591 |
+
{
|
| 592 |
+
"cell_type": "code",
|
| 593 |
+
"execution_count": null,
|
| 594 |
+
"metadata": {
|
| 595 |
+
"id": "5FwE9CefKQk6"
|
| 596 |
+
},
|
| 597 |
+
"outputs": [],
|
| 598 |
+
"source": [
|
| 599 |
+
"ref_s = compute_style(\"Demo/reference_audio/1221-135767-0014.wav\")"
|
| 600 |
+
]
|
| 601 |
+
},
|
| 602 |
+
{
|
| 603 |
+
"cell_type": "code",
|
| 604 |
+
"execution_count": null,
|
| 605 |
+
"metadata": {
|
| 606 |
+
"id": "0CKMI0ZsKUDh"
|
| 607 |
+
},
|
| 608 |
+
"outputs": [],
|
| 609 |
+
"source": [
|
| 610 |
+
"texts = {}\n",
|
| 611 |
+
"texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 612 |
+
"texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 613 |
+
"texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 614 |
+
"texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\"\n",
|
| 615 |
+
"\n",
|
| 616 |
+
"for k,v in texts.items():\n",
|
| 617 |
+
" wav = inference(v, ref_s, diffusion_steps=10, alpha=0.3, beta=0.7, embedding_scale=1)\n",
|
| 618 |
+
" print(k + \": \")\n",
|
| 619 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 620 |
+
]
|
| 621 |
+
},
|
| 622 |
+
{
|
| 623 |
+
"cell_type": "markdown",
|
| 624 |
+
"metadata": {
|
| 625 |
+
"id": "reemQKVEKWAZ"
|
| 626 |
+
},
|
| 627 |
+
"source": [
|
| 628 |
+
"#### With `embedding_scale=2`"
|
| 629 |
+
]
|
| 630 |
+
},
|
| 631 |
+
{
|
| 632 |
+
"cell_type": "code",
|
| 633 |
+
"execution_count": null,
|
| 634 |
+
"metadata": {
|
| 635 |
+
"id": "npIAiAUvKYGv"
|
| 636 |
+
},
|
| 637 |
+
"outputs": [],
|
| 638 |
+
"source": [
|
| 639 |
+
"texts = {}\n",
|
| 640 |
+
"texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 641 |
+
"texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 642 |
+
"texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 643 |
+
"texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\"\n",
|
| 644 |
+
"\n",
|
| 645 |
+
"for k,v in texts.items():\n",
|
| 646 |
+
" noise = torch.randn(1,1,256).to(device)\n",
|
| 647 |
+
" wav = inference(v, ref_s, diffusion_steps=10, alpha=0.3, beta=0.7, embedding_scale=2)\n",
|
| 648 |
+
" print(k + \": \")\n",
|
| 649 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 650 |
+
]
|
| 651 |
+
},
|
| 652 |
+
{
|
| 653 |
+
"cell_type": "markdown",
|
| 654 |
+
"metadata": {
|
| 655 |
+
"id": "lqKZaXeYKbrH"
|
| 656 |
+
},
|
| 657 |
+
"source": [
|
| 658 |
+
"#### With `embedding_scale=2, alpha = 0.5, beta = 0.9`\n",
|
| 659 |
+
"`alpha` and `beta` is the factor to determine much we use the style sampled based on the text instead of the reference. The higher the value of `alpha` and `beta`, the more suitable the style it is to the text but less similar to the reference. Using higher beta makes the synthesized speech more emotional, at the cost of lower similarity to the reference. `alpha` determines the timbre of the speaker while `beta` determines the prosody."
|
| 660 |
+
]
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"cell_type": "code",
|
| 664 |
+
"execution_count": null,
|
| 665 |
+
"metadata": {
|
| 666 |
+
"id": "VjXuRCCWKcdN"
|
| 667 |
+
},
|
| 668 |
+
"outputs": [],
|
| 669 |
+
"source": [
|
| 670 |
+
"texts = {}\n",
|
| 671 |
+
"texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 672 |
+
"texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 673 |
+
"texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 674 |
+
"texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\"\n",
|
| 675 |
+
"\n",
|
| 676 |
+
"for k,v in texts.items():\n",
|
| 677 |
+
" noise = torch.randn(1,1,256).to(device)\n",
|
| 678 |
+
" wav = inference(v, ref_s, diffusion_steps=10, alpha=0.5, beta=0.9, embedding_scale=2)\n",
|
| 679 |
+
" print(k + \": \")\n",
|
| 680 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 681 |
+
]
|
| 682 |
+
},
|
| 683 |
+
{
|
| 684 |
+
"cell_type": "markdown",
|
| 685 |
+
"metadata": {
|
| 686 |
+
"id": "xrwYXGh0KiIW"
|
| 687 |
+
},
|
| 688 |
+
"source": [
|
| 689 |
+
"### Zero-shot speaker adaptation\n",
|
| 690 |
+
"This section recreates the \"Acoustic Environment Maintenance\" and \"Speaker’s Emotion Maintenance\" demo in [Section 4](https://styletts2.github.io/#libri) of the demo page. You can compare the generated samples to popular zero-shot TTS models like Vall-E. Note that the model was trained only on LibriTTS, which is about 250 times fewer data compared to those used to trian Vall-E with similar or better effect for these maintainance."
|
| 691 |
+
]
|
| 692 |
+
},
|
| 693 |
+
{
|
| 694 |
+
"cell_type": "markdown",
|
| 695 |
+
"metadata": {
|
| 696 |
+
"id": "ETUywHHmKimE"
|
| 697 |
+
},
|
| 698 |
+
"source": [
|
| 699 |
+
"#### Acoustic Environment Maintenance\n",
|
| 700 |
+
"\n",
|
| 701 |
+
"Since we want to maintain the acoustic environment in the speaker (timbre), we set `alpha = 0` to make the speaker as close to the reference as possible while only changing the prosody according to the text. "
|
| 702 |
+
]
|
| 703 |
+
},
|
| 704 |
+
{
|
| 705 |
+
"cell_type": "code",
|
| 706 |
+
"execution_count": null,
|
| 707 |
+
"metadata": {
|
| 708 |
+
"id": "yvjBK3syKnZL"
|
| 709 |
+
},
|
| 710 |
+
"outputs": [],
|
| 711 |
+
"source": [
|
| 712 |
+
"reference_dicts = {}\n",
|
| 713 |
+
"# format: (path, text)\n",
|
| 714 |
+
"reference_dicts['3'] = (\"Demo/reference_audio/3.wav\", \"As friends thing I definitely I've got more male friends.\")\n",
|
| 715 |
+
"reference_dicts['4'] = (\"Demo/reference_audio/4.wav\", \"Everything is run by computer but you got to know how to think before you can do a computer.\")\n",
|
| 716 |
+
"reference_dicts['5'] = (\"Demo/reference_audio/5.wav\", \"Then out in LA you guys got a whole another ball game within California to worry about.\")"
|
| 717 |
+
]
|
| 718 |
+
},
|
| 719 |
+
{
|
| 720 |
+
"cell_type": "code",
|
| 721 |
+
"execution_count": null,
|
| 722 |
+
"metadata": {
|
| 723 |
+
"id": "jclowWp4KomJ"
|
| 724 |
+
},
|
| 725 |
+
"outputs": [],
|
| 726 |
+
"source": [
|
| 727 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 728 |
+
"for k, v in reference_dicts.items():\n",
|
| 729 |
+
" path, text = v\n",
|
| 730 |
+
" ref_s = compute_style(path)\n",
|
| 731 |
+
" start = time.time()\n",
|
| 732 |
+
" wav = inference(text, ref_s, alpha=0.0, beta=0.5, diffusion_steps=5, embedding_scale=1)\n",
|
| 733 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 734 |
+
" print(f\"RTF = {rtf:5f}\")\n",
|
| 735 |
+
" import IPython.display as ipd\n",
|
| 736 |
+
" print('Synthesized: ' + text)\n",
|
| 737 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 738 |
+
" print('Reference:')\n",
|
| 739 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 740 |
+
]
|
| 741 |
+
},
|
| 742 |
+
{
|
| 743 |
+
"cell_type": "markdown",
|
| 744 |
+
"metadata": {
|
| 745 |
+
"id": "LgIm7M93KqVZ"
|
| 746 |
+
},
|
| 747 |
+
"source": [
|
| 748 |
+
"#### Speaker’s Emotion Maintenance\n",
|
| 749 |
+
"\n",
|
| 750 |
+
"Since we want to maintain the emotion in the speaker (prosody), we set `beta = 0.1` to make the speaker as closer to the reference as possible while having some diversity thruogh the slight timbre change."
|
| 751 |
+
]
|
| 752 |
+
},
|
| 753 |
+
{
|
| 754 |
+
"cell_type": "code",
|
| 755 |
+
"execution_count": null,
|
| 756 |
+
"metadata": {
|
| 757 |
+
"id": "yzsNoP6oKulL"
|
| 758 |
+
},
|
| 759 |
+
"outputs": [],
|
| 760 |
+
"source": [
|
| 761 |
+
"reference_dicts = {}\n",
|
| 762 |
+
"# format: (path, text)\n",
|
| 763 |
+
"reference_dicts['Anger'] = (\"Demo/reference_audio/anger.wav\", \"We have to reduce the number of plastic bags.\")\n",
|
| 764 |
+
"reference_dicts['Sleepy'] = (\"Demo/reference_audio/sleepy.wav\", \"We have to reduce the number of plastic bags.\")\n",
|
| 765 |
+
"reference_dicts['Amused'] = (\"Demo/reference_audio/amused.wav\", \"We have to reduce the number of plastic bags.\")\n",
|
| 766 |
+
"reference_dicts['Disgusted'] = (\"Demo/reference_audio/disgusted.wav\", \"We have to reduce the number of plastic bags.\")"
|
| 767 |
+
]
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"cell_type": "code",
|
| 771 |
+
"execution_count": null,
|
| 772 |
+
"metadata": {
|
| 773 |
+
"id": "7h2-9cpfKwr4"
|
| 774 |
+
},
|
| 775 |
+
"outputs": [],
|
| 776 |
+
"source": [
|
| 777 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 778 |
+
"for k, v in reference_dicts.items():\n",
|
| 779 |
+
" path, text = v\n",
|
| 780 |
+
" ref_s = compute_style(path)\n",
|
| 781 |
+
" start = time.time()\n",
|
| 782 |
+
" wav = inference(text, ref_s, alpha=0.3, beta=0.1, diffusion_steps=10, embedding_scale=1)\n",
|
| 783 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 784 |
+
" print(f\"RTF = {rtf:5f}\")\n",
|
| 785 |
+
" import IPython.display as ipd\n",
|
| 786 |
+
" print(k + ' Synthesized: ' + text)\n",
|
| 787 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 788 |
+
" print(k + ' Reference:')\n",
|
| 789 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 790 |
+
]
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"cell_type": "markdown",
|
| 794 |
+
"metadata": {
|
| 795 |
+
"id": "aNS82PGwKzgg"
|
| 796 |
+
},
|
| 797 |
+
"source": [
|
| 798 |
+
"### Longform Narration\n",
|
| 799 |
+
"\n",
|
| 800 |
+
"This section includes basic implementation of Algorithm 1 in the paper for consistent longform audio generation. The example passage is taken from [Section 5](https://styletts2.github.io/#long) of the demo page."
|
| 801 |
+
]
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"cell_type": "code",
|
| 805 |
+
"execution_count": null,
|
| 806 |
+
"metadata": {
|
| 807 |
+
"cellView": "form",
|
| 808 |
+
"id": "qs97nL5HK5DH"
|
| 809 |
+
},
|
| 810 |
+
"outputs": [],
|
| 811 |
+
"source": [
|
| 812 |
+
"passage = passage = '''If the supply of fruit is greater than the family needs, it may be made a source of income by sending the fresh fruit to the market if there is one near enough, or by preserving, canning, and making jelly for sale. To make such an enterprise a success the fruit and work must be first class. There is magic in the word \"Homemade,\" when the product appeals to the eye and the palate; but many careless and incompetent people have found to their sorrow that this word has not magic enough to float inferior goods on the market. As a rule large canning and preserving establishments are clean and have the best appliances, and they employ chemists and skilled labor. The home product must be very good to compete with the attractive goods that are sent out from such establishments. Yet for first class home made products there is a market in all large cities. All first-class grocers have customers who purchase such goods.''' # @param {type:\"string\"}"
|
| 813 |
+
]
|
| 814 |
+
},
|
| 815 |
+
{
|
| 816 |
+
"cell_type": "code",
|
| 817 |
+
"execution_count": null,
|
| 818 |
+
"metadata": {
|
| 819 |
+
"colab": {
|
| 820 |
+
"background_save": true
|
| 821 |
+
},
|
| 822 |
+
"id": "8Mu9whHYK_1b"
|
| 823 |
+
},
|
| 824 |
+
"outputs": [],
|
| 825 |
+
"source": [
|
| 826 |
+
"# seen speaker\n",
|
| 827 |
+
"path = \"Demo/reference_audio/696_92939_000016_000006.wav\"\n",
|
| 828 |
+
"s_ref = compute_style(path)\n",
|
| 829 |
+
"sentences = passage.split('.') # simple split by comma\n",
|
| 830 |
+
"wavs = []\n",
|
| 831 |
+
"s_prev = None\n",
|
| 832 |
+
"for text in sentences:\n",
|
| 833 |
+
" if text.strip() == \"\": continue\n",
|
| 834 |
+
" text += '.' # add it back\n",
|
| 835 |
+
"\n",
|
| 836 |
+
" wav, s_prev = LFinference(text,\n",
|
| 837 |
+
" s_prev,\n",
|
| 838 |
+
" s_ref,\n",
|
| 839 |
+
" alpha = 0.3,\n",
|
| 840 |
+
" beta = 0.9, # make it more suitable for the text\n",
|
| 841 |
+
" t = 0.7,\n",
|
| 842 |
+
" diffusion_steps=10, embedding_scale=1.5)\n",
|
| 843 |
+
" wavs.append(wav)\n",
|
| 844 |
+
"print('Synthesized: ')\n",
|
| 845 |
+
"display(ipd.Audio(np.concatenate(wavs), rate=24000, normalize=False))\n",
|
| 846 |
+
"print('Reference: ')\n",
|
| 847 |
+
"display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 848 |
+
]
|
| 849 |
+
},
|
| 850 |
+
{
|
| 851 |
+
"cell_type": "markdown",
|
| 852 |
+
"metadata": {
|
| 853 |
+
"id": "81Rh-lgWLB2i"
|
| 854 |
+
},
|
| 855 |
+
"source": [
|
| 856 |
+
"### Style Transfer\n",
|
| 857 |
+
"\n",
|
| 858 |
+
"The following section demostrates the style transfer capacity for unseen speakers in [Section 6](https://styletts2.github.io/#emo) of the demo page. For this, we set `alpha=0.5, beta = 0.9` for the most pronounced effects (mostly using the sampled style)."
|
| 859 |
+
]
|
| 860 |
+
},
|
| 861 |
+
{
|
| 862 |
+
"cell_type": "code",
|
| 863 |
+
"execution_count": null,
|
| 864 |
+
"metadata": {
|
| 865 |
+
"id": "CtIgr5kOLE9a"
|
| 866 |
+
},
|
| 867 |
+
"outputs": [],
|
| 868 |
+
"source": [
|
| 869 |
+
"# reference texts to sample styles\n",
|
| 870 |
+
"\n",
|
| 871 |
+
"ref_texts = {}\n",
|
| 872 |
+
"ref_texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 873 |
+
"ref_texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 874 |
+
"ref_texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 875 |
+
"ref_texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\""
|
| 876 |
+
]
|
| 877 |
+
},
|
| 878 |
+
{
|
| 879 |
+
"cell_type": "code",
|
| 880 |
+
"execution_count": null,
|
| 881 |
+
"metadata": {
|
| 882 |
+
"id": "MlA1CbhzLIoI"
|
| 883 |
+
},
|
| 884 |
+
"outputs": [],
|
| 885 |
+
"source": [
|
| 886 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 887 |
+
"s_ref = compute_style(path)\n",
|
| 888 |
+
"\n",
|
| 889 |
+
"text = \"Yea, his honourable worship is within, but he hath a godly minister or two with him, and likewise a leech.\"\n",
|
| 890 |
+
"for k,v in ref_texts.items():\n",
|
| 891 |
+
" wav = STinference(text, s_ref, v, diffusion_steps=10, alpha=0.5, beta=0.9, embedding_scale=1.5)\n",
|
| 892 |
+
" print(k + \": \")\n",
|
| 893 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 894 |
+
]
|
| 895 |
+
},
|
| 896 |
+
{
|
| 897 |
+
"cell_type": "markdown",
|
| 898 |
+
"metadata": {
|
| 899 |
+
"id": "2M0iaXlkLJUQ"
|
| 900 |
+
},
|
| 901 |
+
"source": [
|
| 902 |
+
"### Speech diversity\n",
|
| 903 |
+
"\n",
|
| 904 |
+
"This section reproduces samples in [Section 7](https://styletts2.github.io/#var) of the demo page.\n",
|
| 905 |
+
"\n",
|
| 906 |
+
"`alpha` and `beta` determine the diversity of the synthesized speech. There are two extreme cases:\n",
|
| 907 |
+
"- If `alpha = 1` and `beta = 1`, the synthesized speech sounds the most dissimilar to the reference speaker, but it is also the most diverse (each time you synthesize a speech it will be totally different).\n",
|
| 908 |
+
"- If `alpha = 0` and `beta = 0`, the synthesized speech sounds the most siimlar to the reference speaker, but it is deterministic (i.e., the sampled style is not used for speech synthesis).\n"
|
| 909 |
+
]
|
| 910 |
+
},
|
| 911 |
+
{
|
| 912 |
+
"cell_type": "markdown",
|
| 913 |
+
"metadata": {
|
| 914 |
+
"id": "tSxZDvF2LNu4"
|
| 915 |
+
},
|
| 916 |
+
"source": [
|
| 917 |
+
"#### Default setting (`alpha = 0.3, beta=0.7`)\n",
|
| 918 |
+
"This setting uses 70% of the reference timbre and 30% of the reference prosody and use the diffusion model to sample them based on the text."
|
| 919 |
+
]
|
| 920 |
+
},
|
| 921 |
+
{
|
| 922 |
+
"cell_type": "code",
|
| 923 |
+
"execution_count": null,
|
| 924 |
+
"metadata": {
|
| 925 |
+
"id": "AAomGCDZLIt5"
|
| 926 |
+
},
|
| 927 |
+
"outputs": [],
|
| 928 |
+
"source": [
|
| 929 |
+
"# unseen speaker\n",
|
| 930 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 931 |
+
"ref_s = compute_style(path)\n",
|
| 932 |
+
"\n",
|
| 933 |
+
"text = \"How much variation is there?\"\n",
|
| 934 |
+
"for _ in range(5):\n",
|
| 935 |
+
" wav = inference(text, ref_s, diffusion_steps=10, alpha=0.3, beta=0.7, embedding_scale=1)\n",
|
| 936 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 937 |
+
]
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"cell_type": "markdown",
|
| 941 |
+
"metadata": {
|
| 942 |
+
"id": "BKrSMdgcLQRP"
|
| 943 |
+
},
|
| 944 |
+
"source": [
|
| 945 |
+
"#### Less diverse setting (`alpha = 0.1, beta=0.3`)\n",
|
| 946 |
+
"This setting uses 90% of the reference timbre and 70% of the reference prosody. This makes it more similar to the reference speaker at cost of less diverse samples."
|
| 947 |
+
]
|
| 948 |
+
},
|
| 949 |
+
{
|
| 950 |
+
"cell_type": "code",
|
| 951 |
+
"execution_count": null,
|
| 952 |
+
"metadata": {
|
| 953 |
+
"id": "Uo7gVmFoLRfm"
|
| 954 |
+
},
|
| 955 |
+
"outputs": [],
|
| 956 |
+
"source": [
|
| 957 |
+
"# unseen speaker\n",
|
| 958 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 959 |
+
"ref_s = compute_style(path)\n",
|
| 960 |
+
"\n",
|
| 961 |
+
"text = \"How much variation is there?\"\n",
|
| 962 |
+
"for _ in range(5):\n",
|
| 963 |
+
" wav = inference(text, ref_s, diffusion_steps=10, alpha=0.1, beta=0.3, embedding_scale=1)\n",
|
| 964 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 965 |
+
]
|
| 966 |
+
},
|
| 967 |
+
{
|
| 968 |
+
"cell_type": "markdown",
|
| 969 |
+
"metadata": {
|
| 970 |
+
"id": "nfQ0Xrg9LStd"
|
| 971 |
+
},
|
| 972 |
+
"source": [
|
| 973 |
+
"#### More diverse setting (`alpha = 0.5, beta=0.95`)\n",
|
| 974 |
+
"This setting uses 50% of the reference timbre and 5% of the reference prosody (so it uses 100% of the sampled prosody, which makes it more diverse), but this makes it more dissimilar to the reference speaker. "
|
| 975 |
+
]
|
| 976 |
+
},
|
| 977 |
+
{
|
| 978 |
+
"cell_type": "code",
|
| 979 |
+
"execution_count": null,
|
| 980 |
+
"metadata": {
|
| 981 |
+
"id": "cPHz4BzVLT_u"
|
| 982 |
+
},
|
| 983 |
+
"outputs": [],
|
| 984 |
+
"source": [
|
| 985 |
+
"# unseen speaker\n",
|
| 986 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 987 |
+
"ref_s = compute_style(path)\n",
|
| 988 |
+
"\n",
|
| 989 |
+
"text = \"How much variation is there?\"\n",
|
| 990 |
+
"for _ in range(5):\n",
|
| 991 |
+
" wav = inference(text, ref_s, diffusion_steps=10, alpha=0.5, beta=0.95, embedding_scale=1)\n",
|
| 992 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 993 |
+
]
|
| 994 |
+
},
|
| 995 |
+
{
|
| 996 |
+
"cell_type": "markdown",
|
| 997 |
+
"source": [
|
| 998 |
+
"#### Extreme setting (`alpha = 1, beta=1`)\n",
|
| 999 |
+
"This setting uses 0% of the reference timbre and prosody and use the diffusion model to sample the entire style. This makes the speaker very dissimilar to the reference speaker."
|
| 1000 |
+
],
|
| 1001 |
+
"metadata": {
|
| 1002 |
+
"id": "hPKg9eYpL00f"
|
| 1003 |
+
}
|
| 1004 |
+
},
|
| 1005 |
+
{
|
| 1006 |
+
"cell_type": "code",
|
| 1007 |
+
"source": [
|
| 1008 |
+
"# unseen speaker\n",
|
| 1009 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 1010 |
+
"ref_s = compute_style(path)\n",
|
| 1011 |
+
"\n",
|
| 1012 |
+
"text = \"How much variation is there?\"\n",
|
| 1013 |
+
"for _ in range(5):\n",
|
| 1014 |
+
" wav = inference(text, ref_s, diffusion_steps=10, alpha=1, beta=1, embedding_scale=1)\n",
|
| 1015 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 1016 |
+
],
|
| 1017 |
+
"metadata": {
|
| 1018 |
+
"id": "Ei-7JOccL0bF"
|
| 1019 |
+
},
|
| 1020 |
+
"execution_count": null,
|
| 1021 |
+
"outputs": []
|
| 1022 |
+
},
|
| 1023 |
+
{
|
| 1024 |
+
"cell_type": "markdown",
|
| 1025 |
+
"source": [
|
| 1026 |
+
"#### No variation (`alpha = 0, beta=0`)\n",
|
| 1027 |
+
"This setting uses 100% of the reference timbre and prosody and do not use the diffusion model at all. This makes the speaker very similar to the reference speaker, but there is no variation."
|
| 1028 |
+
],
|
| 1029 |
+
"metadata": {
|
| 1030 |
+
"id": "FVMPc3bhL3eL"
|
| 1031 |
+
}
|
| 1032 |
+
},
|
| 1033 |
+
{
|
| 1034 |
+
"cell_type": "code",
|
| 1035 |
+
"source": [
|
| 1036 |
+
"# unseen speaker\n",
|
| 1037 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 1038 |
+
"ref_s = compute_style(path)\n",
|
| 1039 |
+
"\n",
|
| 1040 |
+
"text = \"How much variation is there?\"\n",
|
| 1041 |
+
"for _ in range(5):\n",
|
| 1042 |
+
" wav = inference(text, ref_s, diffusion_steps=10, alpha=0, beta=0, embedding_scale=1)\n",
|
| 1043 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 1044 |
+
],
|
| 1045 |
+
"metadata": {
|
| 1046 |
+
"id": "yh1QZ7uhL4wM"
|
| 1047 |
+
},
|
| 1048 |
+
"execution_count": null,
|
| 1049 |
+
"outputs": []
|
| 1050 |
+
},
|
| 1051 |
+
{
|
| 1052 |
+
"cell_type": "markdown",
|
| 1053 |
+
"source": [
|
| 1054 |
+
"### Extra fun!\n",
|
| 1055 |
+
"\n",
|
| 1056 |
+
"You can record your own voice and clone it using pre-trained StyleTTS 2 model here."
|
| 1057 |
+
],
|
| 1058 |
+
"metadata": {
|
| 1059 |
+
"id": "T0EvkWrAMBDB"
|
| 1060 |
+
}
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"cell_type": "markdown",
|
| 1064 |
+
"source": [
|
| 1065 |
+
"#### Run the following cell to record your voice for 5 seconds. Please keep speaking to have the best effect."
|
| 1066 |
+
],
|
| 1067 |
+
"metadata": {
|
| 1068 |
+
"id": "R985j5QONY8I"
|
| 1069 |
+
}
|
| 1070 |
+
},
|
| 1071 |
+
{
|
| 1072 |
+
"cell_type": "code",
|
| 1073 |
+
"source": [
|
| 1074 |
+
"# all imports\n",
|
| 1075 |
+
"from IPython.display import Javascript\n",
|
| 1076 |
+
"from google.colab import output\n",
|
| 1077 |
+
"from base64 import b64decode\n",
|
| 1078 |
+
"\n",
|
| 1079 |
+
"RECORD = \"\"\"\n",
|
| 1080 |
+
"const sleep = time => new Promise(resolve => setTimeout(resolve, time))\n",
|
| 1081 |
+
"const b2text = blob => new Promise(resolve => {\n",
|
| 1082 |
+
" const reader = new FileReader()\n",
|
| 1083 |
+
" reader.onloadend = e => resolve(e.srcElement.result)\n",
|
| 1084 |
+
" reader.readAsDataURL(blob)\n",
|
| 1085 |
+
"})\n",
|
| 1086 |
+
"var record = time => new Promise(async resolve => {\n",
|
| 1087 |
+
" stream = await navigator.mediaDevices.getUserMedia({ audio: true })\n",
|
| 1088 |
+
" recorder = new MediaRecorder(stream)\n",
|
| 1089 |
+
" chunks = []\n",
|
| 1090 |
+
" recorder.ondataavailable = e => chunks.push(e.data)\n",
|
| 1091 |
+
" recorder.start()\n",
|
| 1092 |
+
" await sleep(time)\n",
|
| 1093 |
+
" recorder.onstop = async ()=>{\n",
|
| 1094 |
+
" blob = new Blob(chunks)\n",
|
| 1095 |
+
" text = await b2text(blob)\n",
|
| 1096 |
+
" resolve(text)\n",
|
| 1097 |
+
" }\n",
|
| 1098 |
+
" recorder.stop()\n",
|
| 1099 |
+
"})\n",
|
| 1100 |
+
"\"\"\"\n",
|
| 1101 |
+
"\n",
|
| 1102 |
+
"def record(sec=3):\n",
|
| 1103 |
+
" display(Javascript(RECORD))\n",
|
| 1104 |
+
" s = output.eval_js('record(%d)' % (sec*1000))\n",
|
| 1105 |
+
" b = b64decode(s.split(',')[1])\n",
|
| 1106 |
+
" with open('audio.wav','wb') as f:\n",
|
| 1107 |
+
" f.write(b)\n",
|
| 1108 |
+
" return 'audio.wav' # or webm ?"
|
| 1109 |
+
],
|
| 1110 |
+
"metadata": {
|
| 1111 |
+
"id": "MWrFs0KWMBpz"
|
| 1112 |
+
},
|
| 1113 |
+
"execution_count": null,
|
| 1114 |
+
"outputs": []
|
| 1115 |
+
},
|
| 1116 |
+
{
|
| 1117 |
+
"cell_type": "markdown",
|
| 1118 |
+
"source": [
|
| 1119 |
+
"#### Please run this cell and speak:"
|
| 1120 |
+
],
|
| 1121 |
+
"metadata": {
|
| 1122 |
+
"id": "z35qXwM0Nhx1"
|
| 1123 |
+
}
|
| 1124 |
+
},
|
| 1125 |
+
{
|
| 1126 |
+
"cell_type": "code",
|
| 1127 |
+
"source": [
|
| 1128 |
+
"print('Speak now for 5 seconds.')\n",
|
| 1129 |
+
"audio = record(sec=5)\n",
|
| 1130 |
+
"import IPython.display as ipd\n",
|
| 1131 |
+
"display(ipd.Audio(audio, rate=24000, normalize=False))"
|
| 1132 |
+
],
|
| 1133 |
+
"metadata": {
|
| 1134 |
+
"id": "KUEoFyQBMR-8"
|
| 1135 |
+
},
|
| 1136 |
+
"execution_count": null,
|
| 1137 |
+
"outputs": []
|
| 1138 |
+
},
|
| 1139 |
+
{
|
| 1140 |
+
"cell_type": "markdown",
|
| 1141 |
+
"source": [
|
| 1142 |
+
"#### Synthesize in your own voice"
|
| 1143 |
+
],
|
| 1144 |
+
"metadata": {
|
| 1145 |
+
"id": "OQS_7IBpNmM1"
|
| 1146 |
+
}
|
| 1147 |
+
},
|
| 1148 |
+
{
|
| 1149 |
+
"cell_type": "code",
|
| 1150 |
+
"source": [
|
| 1151 |
+
"text = ''' StyleTTS 2 is a text to speech model that leverages style diffusion and adversarial training with large speech language models to achieve human level text to speech synthesis. ''' # @param {type:\"string\"}\n"
|
| 1152 |
+
],
|
| 1153 |
+
"metadata": {
|
| 1154 |
+
"cellView": "form",
|
| 1155 |
+
"id": "c0I3LY7vM8Ta"
|
| 1156 |
+
},
|
| 1157 |
+
"execution_count": null,
|
| 1158 |
+
"outputs": []
|
| 1159 |
+
},
|
| 1160 |
+
{
|
| 1161 |
+
"cell_type": "code",
|
| 1162 |
+
"source": [
|
| 1163 |
+
"reference_dicts = {}\n",
|
| 1164 |
+
"reference_dicts['You'] = audio"
|
| 1165 |
+
],
|
| 1166 |
+
"metadata": {
|
| 1167 |
+
"id": "80eW-pwxNCxu"
|
| 1168 |
+
},
|
| 1169 |
+
"execution_count": null,
|
| 1170 |
+
"outputs": []
|
| 1171 |
+
},
|
| 1172 |
+
{
|
| 1173 |
+
"cell_type": "code",
|
| 1174 |
+
"source": [
|
| 1175 |
+
"start = time.time()\n",
|
| 1176 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 1177 |
+
"for k, path in reference_dicts.items():\n",
|
| 1178 |
+
" ref_s = compute_style(path)\n",
|
| 1179 |
+
"\n",
|
| 1180 |
+
" wav = inference(text, ref_s, alpha=0.1, beta=0.5, diffusion_steps=5, embedding_scale=1)\n",
|
| 1181 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 1182 |
+
" print('Speaker: ' + k)\n",
|
| 1183 |
+
" import IPython.display as ipd\n",
|
| 1184 |
+
" print('Synthesized:')\n",
|
| 1185 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 1186 |
+
" print('Reference:')\n",
|
| 1187 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 1188 |
+
],
|
| 1189 |
+
"metadata": {
|
| 1190 |
+
"id": "yIga6MTuNJaN"
|
| 1191 |
+
},
|
| 1192 |
+
"execution_count": null,
|
| 1193 |
+
"outputs": []
|
| 1194 |
+
}
|
| 1195 |
+
],
|
| 1196 |
+
"metadata": {
|
| 1197 |
+
"accelerator": "GPU",
|
| 1198 |
+
"colab": {
|
| 1199 |
+
"provenance": [],
|
| 1200 |
+
"collapsed_sections": [
|
| 1201 |
+
"aAGQPfgYIR23",
|
| 1202 |
+
"eJdB_nCOIVIN",
|
| 1203 |
+
"R985j5QONY8I"
|
| 1204 |
+
],
|
| 1205 |
+
"authorship_tag": "ABX9TyPQdFTqqVEknEG/ma/HMfU+",
|
| 1206 |
+
"include_colab_link": true
|
| 1207 |
+
},
|
| 1208 |
+
"kernelspec": {
|
| 1209 |
+
"display_name": "Python 3",
|
| 1210 |
+
"name": "python3"
|
| 1211 |
+
},
|
| 1212 |
+
"language_info": {
|
| 1213 |
+
"name": "python"
|
| 1214 |
+
}
|
| 1215 |
+
},
|
| 1216 |
+
"nbformat": 4,
|
| 1217 |
+
"nbformat_minor": 0
|
| 1218 |
+
}
|
Colab/StyleTTS2_Finetune_Demo.ipynb
ADDED
|
@@ -0,0 +1,480 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"nbformat": 4,
|
| 3 |
+
"nbformat_minor": 0,
|
| 4 |
+
"metadata": {
|
| 5 |
+
"colab": {
|
| 6 |
+
"provenance": [],
|
| 7 |
+
"gpuType": "T4",
|
| 8 |
+
"authorship_tag": "ABX9TyNiDU9ykIeYxO86Lmuid+ph",
|
| 9 |
+
"include_colab_link": true
|
| 10 |
+
},
|
| 11 |
+
"kernelspec": {
|
| 12 |
+
"name": "python3",
|
| 13 |
+
"display_name": "Python 3"
|
| 14 |
+
},
|
| 15 |
+
"language_info": {
|
| 16 |
+
"name": "python"
|
| 17 |
+
},
|
| 18 |
+
"accelerator": "GPU"
|
| 19 |
+
},
|
| 20 |
+
"cells": [
|
| 21 |
+
{
|
| 22 |
+
"cell_type": "markdown",
|
| 23 |
+
"metadata": {
|
| 24 |
+
"id": "view-in-github",
|
| 25 |
+
"colab_type": "text"
|
| 26 |
+
},
|
| 27 |
+
"source": [
|
| 28 |
+
"<a href=\"https://colab.research.google.com/github/yl4579/StyleTTS2/blob/main/Colab/StyleTTS2_Finetune_Demo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
| 29 |
+
]
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"cell_type": "markdown",
|
| 33 |
+
"source": [
|
| 34 |
+
"### Install packages and download models"
|
| 35 |
+
],
|
| 36 |
+
"metadata": {
|
| 37 |
+
"id": "yLqBa4uYPrqE"
|
| 38 |
+
}
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"cell_type": "code",
|
| 42 |
+
"source": [
|
| 43 |
+
"%%shell\n",
|
| 44 |
+
"git clone https://github.com/yl4579/StyleTTS2.git\n",
|
| 45 |
+
"cd StyleTTS2\n",
|
| 46 |
+
"pip install SoundFile torchaudio munch torch pydub pyyaml librosa nltk matplotlib accelerate transformers phonemizer einops einops-exts tqdm typing-extensions git+https://github.com/resemble-ai/monotonic_align.git\n",
|
| 47 |
+
"sudo apt-get install espeak-ng\n",
|
| 48 |
+
"git-lfs clone https://huggingface.co/yl4579/StyleTTS2-LibriTTS\n",
|
| 49 |
+
"mv StyleTTS2-LibriTTS/Models ."
|
| 50 |
+
],
|
| 51 |
+
"metadata": {
|
| 52 |
+
"id": "H72WF06ZPrTF"
|
| 53 |
+
},
|
| 54 |
+
"execution_count": null,
|
| 55 |
+
"outputs": []
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"cell_type": "markdown",
|
| 59 |
+
"source": [
|
| 60 |
+
"### Download dataset (LJSpeech, 200 samples, ~15 minutes of data)\n",
|
| 61 |
+
"\n",
|
| 62 |
+
"You can definitely do it with fewer samples. This is just a proof of concept with 200 smaples."
|
| 63 |
+
],
|
| 64 |
+
"metadata": {
|
| 65 |
+
"id": "G398sL8wPzTB"
|
| 66 |
+
}
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"cell_type": "code",
|
| 70 |
+
"source": [
|
| 71 |
+
"%cd StyleTTS2\n",
|
| 72 |
+
"!rm -rf Data"
|
| 73 |
+
],
|
| 74 |
+
"metadata": {
|
| 75 |
+
"id": "kJuQUBrEPy5C"
|
| 76 |
+
},
|
| 77 |
+
"execution_count": null,
|
| 78 |
+
"outputs": []
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"cell_type": "code",
|
| 82 |
+
"source": [
|
| 83 |
+
"!gdown --id 1vqz26D3yn7OXS2vbfYxfSnpLS6m6tOFP\n",
|
| 84 |
+
"!unzip Data.zip"
|
| 85 |
+
],
|
| 86 |
+
"metadata": {
|
| 87 |
+
"id": "mDXW8ZZePuSb"
|
| 88 |
+
},
|
| 89 |
+
"execution_count": null,
|
| 90 |
+
"outputs": []
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"cell_type": "markdown",
|
| 94 |
+
"source": [
|
| 95 |
+
"### Change the finetuning config\n",
|
| 96 |
+
"\n",
|
| 97 |
+
"Depending on the GPU you got, you may want to change the bacth size, max audio length, epiochs and so on."
|
| 98 |
+
],
|
| 99 |
+
"metadata": {
|
| 100 |
+
"id": "_AlBQREWU8ud"
|
| 101 |
+
}
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"cell_type": "code",
|
| 105 |
+
"source": [
|
| 106 |
+
"config_path = \"Configs/config_ft.yml\"\n",
|
| 107 |
+
"\n",
|
| 108 |
+
"import yaml\n",
|
| 109 |
+
"config = yaml.safe_load(open(config_path))"
|
| 110 |
+
],
|
| 111 |
+
"metadata": {
|
| 112 |
+
"id": "7uEITi0hU4I2"
|
| 113 |
+
},
|
| 114 |
+
"execution_count": null,
|
| 115 |
+
"outputs": []
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"cell_type": "code",
|
| 119 |
+
"source": [
|
| 120 |
+
"config['data_params']['root_path'] = \"Data/wavs\"\n",
|
| 121 |
+
"\n",
|
| 122 |
+
"config['batch_size'] = 2 # not enough RAM\n",
|
| 123 |
+
"config['max_len'] = 100 # not enough RAM\n",
|
| 124 |
+
"config['loss_params']['joint_epoch'] = 110 # we do not do SLM adversarial training due to not enough RAM\n",
|
| 125 |
+
"\n",
|
| 126 |
+
"with open(config_path, 'w') as outfile:\n",
|
| 127 |
+
" yaml.dump(config, outfile, default_flow_style=True)"
|
| 128 |
+
],
|
| 129 |
+
"metadata": {
|
| 130 |
+
"id": "TPTRgOKSVT4K"
|
| 131 |
+
},
|
| 132 |
+
"execution_count": null,
|
| 133 |
+
"outputs": []
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"cell_type": "markdown",
|
| 137 |
+
"source": [
|
| 138 |
+
"### Start finetuning\n"
|
| 139 |
+
],
|
| 140 |
+
"metadata": {
|
| 141 |
+
"id": "uUuB_19NWj2Y"
|
| 142 |
+
}
|
| 143 |
+
},
|
| 144 |
+
{
|
| 145 |
+
"cell_type": "code",
|
| 146 |
+
"source": [
|
| 147 |
+
"!python train_finetune.py --config_path ./Configs/config_ft.yml"
|
| 148 |
+
],
|
| 149 |
+
"metadata": {
|
| 150 |
+
"id": "HZVAD5GKWm-O"
|
| 151 |
+
},
|
| 152 |
+
"execution_count": null,
|
| 153 |
+
"outputs": []
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"cell_type": "markdown",
|
| 157 |
+
"source": [
|
| 158 |
+
"### Test the model quality\n",
|
| 159 |
+
"\n",
|
| 160 |
+
"Note that this mainly serves as a proof of concept due to RAM limitation of free Colab instances. A lot of settings are suboptimal. In the future when DDP works for train_second.py, we will also add mixed precision finetuning to save time and RAM. You can also add SLM adversarial training run if you have paid Colab services (such as A100 with 40G of RAM)."
|
| 161 |
+
],
|
| 162 |
+
"metadata": {
|
| 163 |
+
"id": "I0_7wsGkXGfc"
|
| 164 |
+
}
|
| 165 |
+
},
|
| 166 |
+
{
|
| 167 |
+
"cell_type": "code",
|
| 168 |
+
"source": [
|
| 169 |
+
"import nltk\n",
|
| 170 |
+
"nltk.download('punkt')"
|
| 171 |
+
],
|
| 172 |
+
"metadata": {
|
| 173 |
+
"id": "OPLphjbncE7p"
|
| 174 |
+
},
|
| 175 |
+
"execution_count": null,
|
| 176 |
+
"outputs": []
|
| 177 |
+
},
|
| 178 |
+
{
|
| 179 |
+
"cell_type": "code",
|
| 180 |
+
"source": [
|
| 181 |
+
"import torch\n",
|
| 182 |
+
"torch.manual_seed(0)\n",
|
| 183 |
+
"torch.backends.cudnn.benchmark = False\n",
|
| 184 |
+
"torch.backends.cudnn.deterministic = True\n",
|
| 185 |
+
"\n",
|
| 186 |
+
"import random\n",
|
| 187 |
+
"random.seed(0)\n",
|
| 188 |
+
"\n",
|
| 189 |
+
"import numpy as np\n",
|
| 190 |
+
"np.random.seed(0)\n",
|
| 191 |
+
"\n",
|
| 192 |
+
"# load packages\n",
|
| 193 |
+
"import time\n",
|
| 194 |
+
"import random\n",
|
| 195 |
+
"import yaml\n",
|
| 196 |
+
"from munch import Munch\n",
|
| 197 |
+
"import numpy as np\n",
|
| 198 |
+
"import torch\n",
|
| 199 |
+
"from torch import nn\n",
|
| 200 |
+
"import torch.nn.functional as F\n",
|
| 201 |
+
"import torchaudio\n",
|
| 202 |
+
"import librosa\n",
|
| 203 |
+
"from nltk.tokenize import word_tokenize\n",
|
| 204 |
+
"\n",
|
| 205 |
+
"from models import *\n",
|
| 206 |
+
"from utils import *\n",
|
| 207 |
+
"from text_utils import TextCleaner\n",
|
| 208 |
+
"textclenaer = TextCleaner()\n",
|
| 209 |
+
"\n",
|
| 210 |
+
"%matplotlib inline\n",
|
| 211 |
+
"\n",
|
| 212 |
+
"to_mel = torchaudio.transforms.MelSpectrogram(\n",
|
| 213 |
+
" n_mels=80, n_fft=2048, win_length=1200, hop_length=300)\n",
|
| 214 |
+
"mean, std = -4, 4\n",
|
| 215 |
+
"\n",
|
| 216 |
+
"def length_to_mask(lengths):\n",
|
| 217 |
+
" mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)\n",
|
| 218 |
+
" mask = torch.gt(mask+1, lengths.unsqueeze(1))\n",
|
| 219 |
+
" return mask\n",
|
| 220 |
+
"\n",
|
| 221 |
+
"def preprocess(wave):\n",
|
| 222 |
+
" wave_tensor = torch.from_numpy(wave).float()\n",
|
| 223 |
+
" mel_tensor = to_mel(wave_tensor)\n",
|
| 224 |
+
" mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - mean) / std\n",
|
| 225 |
+
" return mel_tensor\n",
|
| 226 |
+
"\n",
|
| 227 |
+
"def compute_style(path):\n",
|
| 228 |
+
" wave, sr = librosa.load(path, sr=24000)\n",
|
| 229 |
+
" audio, index = librosa.effects.trim(wave, top_db=30)\n",
|
| 230 |
+
" if sr != 24000:\n",
|
| 231 |
+
" audio = librosa.resample(audio, sr, 24000)\n",
|
| 232 |
+
" mel_tensor = preprocess(audio).to(device)\n",
|
| 233 |
+
"\n",
|
| 234 |
+
" with torch.no_grad():\n",
|
| 235 |
+
" ref_s = model.style_encoder(mel_tensor.unsqueeze(1))\n",
|
| 236 |
+
" ref_p = model.predictor_encoder(mel_tensor.unsqueeze(1))\n",
|
| 237 |
+
"\n",
|
| 238 |
+
" return torch.cat([ref_s, ref_p], dim=1)\n",
|
| 239 |
+
"\n",
|
| 240 |
+
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
|
| 241 |
+
"\n",
|
| 242 |
+
"# load phonemizer\n",
|
| 243 |
+
"import phonemizer\n",
|
| 244 |
+
"global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True)\n",
|
| 245 |
+
"\n",
|
| 246 |
+
"config = yaml.safe_load(open(\"Models/LJSpeech/config_ft.yml\"))\n",
|
| 247 |
+
"\n",
|
| 248 |
+
"# load pretrained ASR model\n",
|
| 249 |
+
"ASR_config = config.get('ASR_config', False)\n",
|
| 250 |
+
"ASR_path = config.get('ASR_path', False)\n",
|
| 251 |
+
"text_aligner = load_ASR_models(ASR_path, ASR_config)\n",
|
| 252 |
+
"\n",
|
| 253 |
+
"# load pretrained F0 model\n",
|
| 254 |
+
"F0_path = config.get('F0_path', False)\n",
|
| 255 |
+
"pitch_extractor = load_F0_models(F0_path)\n",
|
| 256 |
+
"\n",
|
| 257 |
+
"# load BERT model\n",
|
| 258 |
+
"from Utils.PLBERT.util import load_plbert\n",
|
| 259 |
+
"BERT_path = config.get('PLBERT_dir', False)\n",
|
| 260 |
+
"plbert = load_plbert(BERT_path)\n",
|
| 261 |
+
"\n",
|
| 262 |
+
"model_params = recursive_munch(config['model_params'])\n",
|
| 263 |
+
"model = build_model(model_params, text_aligner, pitch_extractor, plbert)\n",
|
| 264 |
+
"_ = [model[key].eval() for key in model]\n",
|
| 265 |
+
"_ = [model[key].to(device) for key in model]"
|
| 266 |
+
],
|
| 267 |
+
"metadata": {
|
| 268 |
+
"id": "jIIAoDACXJL0"
|
| 269 |
+
},
|
| 270 |
+
"execution_count": null,
|
| 271 |
+
"outputs": []
|
| 272 |
+
},
|
| 273 |
+
{
|
| 274 |
+
"cell_type": "code",
|
| 275 |
+
"source": [
|
| 276 |
+
"files = [f for f in os.listdir(\"Models/LJSpeech/\") if f.endswith('.pth')]\n",
|
| 277 |
+
"sorted_files = sorted(files, key=lambda x: int(x.split('_')[-1].split('.')[0]))"
|
| 278 |
+
],
|
| 279 |
+
"metadata": {
|
| 280 |
+
"id": "eKXRAyyzcMpQ"
|
| 281 |
+
},
|
| 282 |
+
"execution_count": null,
|
| 283 |
+
"outputs": []
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"cell_type": "code",
|
| 287 |
+
"source": [
|
| 288 |
+
"params_whole = torch.load(\"Models/LJSpeech/\" + sorted_files[-1], map_location='cpu')\n",
|
| 289 |
+
"params = params_whole['net']"
|
| 290 |
+
],
|
| 291 |
+
"metadata": {
|
| 292 |
+
"id": "ULuU9-VDb9Pk"
|
| 293 |
+
},
|
| 294 |
+
"execution_count": null,
|
| 295 |
+
"outputs": []
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"cell_type": "code",
|
| 299 |
+
"source": [
|
| 300 |
+
"for key in model:\n",
|
| 301 |
+
" if key in params:\n",
|
| 302 |
+
" print('%s loaded' % key)\n",
|
| 303 |
+
" try:\n",
|
| 304 |
+
" model[key].load_state_dict(params[key])\n",
|
| 305 |
+
" except:\n",
|
| 306 |
+
" from collections import OrderedDict\n",
|
| 307 |
+
" state_dict = params[key]\n",
|
| 308 |
+
" new_state_dict = OrderedDict()\n",
|
| 309 |
+
" for k, v in state_dict.items():\n",
|
| 310 |
+
" name = k[7:] # remove `module.`\n",
|
| 311 |
+
" new_state_dict[name] = v\n",
|
| 312 |
+
" # load params\n",
|
| 313 |
+
" model[key].load_state_dict(new_state_dict, strict=False)\n",
|
| 314 |
+
"# except:\n",
|
| 315 |
+
"# _load(params[key], model[key])\n",
|
| 316 |
+
"_ = [model[key].eval() for key in model]"
|
| 317 |
+
],
|
| 318 |
+
"metadata": {
|
| 319 |
+
"id": "J-U29yIYc2ea"
|
| 320 |
+
},
|
| 321 |
+
"execution_count": null,
|
| 322 |
+
"outputs": []
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"cell_type": "code",
|
| 326 |
+
"source": [
|
| 327 |
+
"from Modules.diffusion.sampler import DiffusionSampler, ADPM2Sampler, KarrasSchedule"
|
| 328 |
+
],
|
| 329 |
+
"metadata": {
|
| 330 |
+
"id": "jrPQ_Yrwc3n6"
|
| 331 |
+
},
|
| 332 |
+
"execution_count": null,
|
| 333 |
+
"outputs": []
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"cell_type": "code",
|
| 337 |
+
"source": [
|
| 338 |
+
"sampler = DiffusionSampler(\n",
|
| 339 |
+
" model.diffusion.diffusion,\n",
|
| 340 |
+
" sampler=ADPM2Sampler(),\n",
|
| 341 |
+
" sigma_schedule=KarrasSchedule(sigma_min=0.0001, sigma_max=3.0, rho=9.0), # empirical parameters\n",
|
| 342 |
+
" clamp=False\n",
|
| 343 |
+
")"
|
| 344 |
+
],
|
| 345 |
+
"metadata": {
|
| 346 |
+
"id": "n2CWYNoqc455"
|
| 347 |
+
},
|
| 348 |
+
"execution_count": null,
|
| 349 |
+
"outputs": []
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"cell_type": "code",
|
| 353 |
+
"source": [
|
| 354 |
+
"def inference(text, ref_s, alpha = 0.3, beta = 0.7, diffusion_steps=5, embedding_scale=1):\n",
|
| 355 |
+
" text = text.strip()\n",
|
| 356 |
+
" ps = global_phonemizer.phonemize([text])\n",
|
| 357 |
+
" ps = word_tokenize(ps[0])\n",
|
| 358 |
+
" ps = ' '.join(ps)\n",
|
| 359 |
+
" tokens = textclenaer(ps)\n",
|
| 360 |
+
" tokens.insert(0, 0)\n",
|
| 361 |
+
" tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n",
|
| 362 |
+
"\n",
|
| 363 |
+
" with torch.no_grad():\n",
|
| 364 |
+
" input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)\n",
|
| 365 |
+
" text_mask = length_to_mask(input_lengths).to(device)\n",
|
| 366 |
+
"\n",
|
| 367 |
+
" t_en = model.text_encoder(tokens, input_lengths, text_mask)\n",
|
| 368 |
+
" bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())\n",
|
| 369 |
+
" d_en = model.bert_encoder(bert_dur).transpose(-1, -2)\n",
|
| 370 |
+
"\n",
|
| 371 |
+
" s_pred = sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(device),\n",
|
| 372 |
+
" embedding=bert_dur,\n",
|
| 373 |
+
" embedding_scale=embedding_scale,\n",
|
| 374 |
+
" features=ref_s, # reference from the same speaker as the embedding\n",
|
| 375 |
+
" num_steps=diffusion_steps).squeeze(1)\n",
|
| 376 |
+
"\n",
|
| 377 |
+
"\n",
|
| 378 |
+
" s = s_pred[:, 128:]\n",
|
| 379 |
+
" ref = s_pred[:, :128]\n",
|
| 380 |
+
"\n",
|
| 381 |
+
" ref = alpha * ref + (1 - alpha) * ref_s[:, :128]\n",
|
| 382 |
+
" s = beta * s + (1 - beta) * ref_s[:, 128:]\n",
|
| 383 |
+
"\n",
|
| 384 |
+
" d = model.predictor.text_encoder(d_en,\n",
|
| 385 |
+
" s, input_lengths, text_mask)\n",
|
| 386 |
+
"\n",
|
| 387 |
+
" x, _ = model.predictor.lstm(d)\n",
|
| 388 |
+
" duration = model.predictor.duration_proj(x)\n",
|
| 389 |
+
"\n",
|
| 390 |
+
" duration = torch.sigmoid(duration).sum(axis=-1)\n",
|
| 391 |
+
" pred_dur = torch.round(duration.squeeze()).clamp(min=1)\n",
|
| 392 |
+
"\n",
|
| 393 |
+
" pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))\n",
|
| 394 |
+
" c_frame = 0\n",
|
| 395 |
+
" for i in range(pred_aln_trg.size(0)):\n",
|
| 396 |
+
" pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1\n",
|
| 397 |
+
" c_frame += int(pred_dur[i].data)\n",
|
| 398 |
+
"\n",
|
| 399 |
+
" # encode prosody\n",
|
| 400 |
+
" en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 401 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 402 |
+
" asr_new = torch.zeros_like(en)\n",
|
| 403 |
+
" asr_new[:, :, 0] = en[:, :, 0]\n",
|
| 404 |
+
" asr_new[:, :, 1:] = en[:, :, 0:-1]\n",
|
| 405 |
+
" en = asr_new\n",
|
| 406 |
+
"\n",
|
| 407 |
+
" F0_pred, N_pred = model.predictor.F0Ntrain(en, s)\n",
|
| 408 |
+
"\n",
|
| 409 |
+
" asr = (t_en @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 410 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 411 |
+
" asr_new = torch.zeros_like(asr)\n",
|
| 412 |
+
" asr_new[:, :, 0] = asr[:, :, 0]\n",
|
| 413 |
+
" asr_new[:, :, 1:] = asr[:, :, 0:-1]\n",
|
| 414 |
+
" asr = asr_new\n",
|
| 415 |
+
"\n",
|
| 416 |
+
" out = model.decoder(asr,\n",
|
| 417 |
+
" F0_pred, N_pred, ref.squeeze().unsqueeze(0))\n",
|
| 418 |
+
"\n",
|
| 419 |
+
"\n",
|
| 420 |
+
" return out.squeeze().cpu().numpy()[..., :-50] # weird pulse at the end of the model, need to be fixed later"
|
| 421 |
+
],
|
| 422 |
+
"metadata": {
|
| 423 |
+
"id": "2x5kVb3nc_eY"
|
| 424 |
+
},
|
| 425 |
+
"execution_count": null,
|
| 426 |
+
"outputs": []
|
| 427 |
+
},
|
| 428 |
+
{
|
| 429 |
+
"cell_type": "markdown",
|
| 430 |
+
"source": [
|
| 431 |
+
"### Synthesize speech"
|
| 432 |
+
],
|
| 433 |
+
"metadata": {
|
| 434 |
+
"id": "O159JnwCc6CC"
|
| 435 |
+
}
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"cell_type": "code",
|
| 439 |
+
"source": [
|
| 440 |
+
"text = '''Maltby and Company would issue warrants on them deliverable to the importer, and the goods were then passed to be stored in neighboring warehouses.\n",
|
| 441 |
+
"'''"
|
| 442 |
+
],
|
| 443 |
+
"metadata": {
|
| 444 |
+
"id": "ThciXQ6rc9Eq"
|
| 445 |
+
},
|
| 446 |
+
"execution_count": null,
|
| 447 |
+
"outputs": []
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"cell_type": "code",
|
| 451 |
+
"source": [
|
| 452 |
+
"# get a random reference in the training set, note that it doesn't matter which one you use\n",
|
| 453 |
+
"path = \"Data/wavs/LJ001-0110.wav\"\n",
|
| 454 |
+
"# this style vector ref_s can be saved as a parameter together with the model weights\n",
|
| 455 |
+
"ref_s = compute_style(path)"
|
| 456 |
+
],
|
| 457 |
+
"metadata": {
|
| 458 |
+
"id": "jldPkJyCc83a"
|
| 459 |
+
},
|
| 460 |
+
"execution_count": null,
|
| 461 |
+
"outputs": []
|
| 462 |
+
},
|
| 463 |
+
{
|
| 464 |
+
"cell_type": "code",
|
| 465 |
+
"source": [
|
| 466 |
+
"start = time.time()\n",
|
| 467 |
+
"wav = inference(text, ref_s, alpha=0.9, beta=0.9, diffusion_steps=10, embedding_scale=1)\n",
|
| 468 |
+
"rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 469 |
+
"print(f\"RTF = {rtf:5f}\")\n",
|
| 470 |
+
"import IPython.display as ipd\n",
|
| 471 |
+
"display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 472 |
+
],
|
| 473 |
+
"metadata": {
|
| 474 |
+
"id": "_mIU0jqDdQ-c"
|
| 475 |
+
},
|
| 476 |
+
"execution_count": null,
|
| 477 |
+
"outputs": []
|
| 478 |
+
}
|
| 479 |
+
]
|
| 480 |
+
}
|
Configs/config.yml
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
log_dir: "Models/LJSpeech"
|
| 2 |
+
first_stage_path: "first_stage.pth"
|
| 3 |
+
save_freq: 2
|
| 4 |
+
log_interval: 10
|
| 5 |
+
device: "cuda"
|
| 6 |
+
epochs_1st: 200 # number of epochs for first stage training (pre-training)
|
| 7 |
+
epochs_2nd: 100 # number of peochs for second stage training (joint training)
|
| 8 |
+
batch_size: 16
|
| 9 |
+
max_len: 400 # maximum number of frames
|
| 10 |
+
pretrained_model: ""
|
| 11 |
+
second_stage_load_pretrained: true # set to true if the pre-trained model is for 2nd stage
|
| 12 |
+
load_only_params: false # set to true if do not want to load epoch numbers and optimizer parameters
|
| 13 |
+
|
| 14 |
+
F0_path: "Utils/JDC/bst.t7"
|
| 15 |
+
ASR_config: "Utils/ASR/config.yml"
|
| 16 |
+
ASR_path: "Utils/ASR/epoch_00080.pth"
|
| 17 |
+
PLBERT_dir: 'Utils/PLBERT/'
|
| 18 |
+
|
| 19 |
+
data_params:
|
| 20 |
+
train_data: "Data/train_list.txt"
|
| 21 |
+
val_data: "Data/val_list.txt"
|
| 22 |
+
root_path: "/local/LJSpeech-1.1/wavs"
|
| 23 |
+
OOD_data: "Data/OOD_texts.txt"
|
| 24 |
+
min_length: 50 # sample until texts with this size are obtained for OOD texts
|
| 25 |
+
|
| 26 |
+
preprocess_params:
|
| 27 |
+
sr: 24000
|
| 28 |
+
spect_params:
|
| 29 |
+
n_fft: 2048
|
| 30 |
+
win_length: 1200
|
| 31 |
+
hop_length: 300
|
| 32 |
+
|
| 33 |
+
model_params:
|
| 34 |
+
multispeaker: false
|
| 35 |
+
|
| 36 |
+
dim_in: 64
|
| 37 |
+
hidden_dim: 512
|
| 38 |
+
max_conv_dim: 512
|
| 39 |
+
n_layer: 3
|
| 40 |
+
n_mels: 80
|
| 41 |
+
|
| 42 |
+
n_token: 178 # number of phoneme tokens
|
| 43 |
+
max_dur: 50 # maximum duration of a single phoneme
|
| 44 |
+
style_dim: 128 # style vector size
|
| 45 |
+
|
| 46 |
+
dropout: 0.2
|
| 47 |
+
|
| 48 |
+
# config for decoder
|
| 49 |
+
decoder:
|
| 50 |
+
type: 'istftnet' # either hifigan or istftnet
|
| 51 |
+
resblock_kernel_sizes: [3,7,11]
|
| 52 |
+
upsample_rates : [10, 6]
|
| 53 |
+
upsample_initial_channel: 512
|
| 54 |
+
resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]]
|
| 55 |
+
upsample_kernel_sizes: [20, 12]
|
| 56 |
+
gen_istft_n_fft: 20
|
| 57 |
+
gen_istft_hop_size: 5
|
| 58 |
+
|
| 59 |
+
# speech language model config
|
| 60 |
+
slm:
|
| 61 |
+
model: 'microsoft/wavlm-base-plus'
|
| 62 |
+
sr: 16000 # sampling rate of SLM
|
| 63 |
+
hidden: 768 # hidden size of SLM
|
| 64 |
+
nlayers: 13 # number of layers of SLM
|
| 65 |
+
initial_channel: 64 # initial channels of SLM discriminator head
|
| 66 |
+
|
| 67 |
+
# style diffusion model config
|
| 68 |
+
diffusion:
|
| 69 |
+
embedding_mask_proba: 0.1
|
| 70 |
+
# transformer config
|
| 71 |
+
transformer:
|
| 72 |
+
num_layers: 3
|
| 73 |
+
num_heads: 8
|
| 74 |
+
head_features: 64
|
| 75 |
+
multiplier: 2
|
| 76 |
+
|
| 77 |
+
# diffusion distribution config
|
| 78 |
+
dist:
|
| 79 |
+
sigma_data: 0.2 # placeholder for estimate_sigma_data set to false
|
| 80 |
+
estimate_sigma_data: true # estimate sigma_data from the current batch if set to true
|
| 81 |
+
mean: -3.0
|
| 82 |
+
std: 1.0
|
| 83 |
+
|
| 84 |
+
loss_params:
|
| 85 |
+
lambda_mel: 5. # mel reconstruction loss
|
| 86 |
+
lambda_gen: 1. # generator loss
|
| 87 |
+
lambda_slm: 1. # slm feature matching loss
|
| 88 |
+
|
| 89 |
+
lambda_mono: 1. # monotonic alignment loss (1st stage, TMA)
|
| 90 |
+
lambda_s2s: 1. # sequence-to-sequence loss (1st stage, TMA)
|
| 91 |
+
TMA_epoch: 50 # TMA starting epoch (1st stage)
|
| 92 |
+
|
| 93 |
+
lambda_F0: 1. # F0 reconstruction loss (2nd stage)
|
| 94 |
+
lambda_norm: 1. # norm reconstruction loss (2nd stage)
|
| 95 |
+
lambda_dur: 1. # duration loss (2nd stage)
|
| 96 |
+
lambda_ce: 20. # duration predictor probability output CE loss (2nd stage)
|
| 97 |
+
lambda_sty: 1. # style reconstruction loss (2nd stage)
|
| 98 |
+
lambda_diff: 1. # score matching loss (2nd stage)
|
| 99 |
+
|
| 100 |
+
diff_epoch: 20 # style diffusion starting epoch (2nd stage)
|
| 101 |
+
joint_epoch: 50 # joint training starting epoch (2nd stage)
|
| 102 |
+
|
| 103 |
+
optimizer_params:
|
| 104 |
+
lr: 0.0001 # general learning rate
|
| 105 |
+
bert_lr: 0.00001 # learning rate for PLBERT
|
| 106 |
+
ft_lr: 0.00001 # learning rate for acoustic modules
|
| 107 |
+
|
| 108 |
+
slmadv_params:
|
| 109 |
+
min_len: 400 # minimum length of samples
|
| 110 |
+
max_len: 500 # maximum length of samples
|
| 111 |
+
batch_percentage: 0.5 # to prevent out of memory, only use half of the original batch size
|
| 112 |
+
iter: 10 # update the discriminator every this iterations of generator update
|
| 113 |
+
thresh: 5 # gradient norm above which the gradient is scaled
|
| 114 |
+
scale: 0.01 # gradient scaling factor for predictors from SLM discriminators
|
| 115 |
+
sig: 1.5 # sigma for differentiable duration modeling
|
| 116 |
+
|
Configs/config_ft.yml
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
log_dir: "Models/LJSpeech"
|
| 2 |
+
save_freq: 5
|
| 3 |
+
log_interval: 10
|
| 4 |
+
device: "cuda"
|
| 5 |
+
epochs: 50 # number of finetuning epoch (1 hour of data)
|
| 6 |
+
batch_size: 8
|
| 7 |
+
max_len: 400 # maximum number of frames
|
| 8 |
+
pretrained_model: "Models/LibriTTS/epochs_2nd_00020.pth"
|
| 9 |
+
second_stage_load_pretrained: true # set to true if the pre-trained model is for 2nd stage
|
| 10 |
+
load_only_params: true # set to true if do not want to load epoch numbers and optimizer parameters
|
| 11 |
+
|
| 12 |
+
F0_path: "Utils/JDC/bst.t7"
|
| 13 |
+
ASR_config: "Utils/ASR/config.yml"
|
| 14 |
+
ASR_path: "Utils/ASR/epoch_00080.pth"
|
| 15 |
+
PLBERT_dir: 'Utils/PLBERT/'
|
| 16 |
+
|
| 17 |
+
data_params:
|
| 18 |
+
train_data: "Data/train_list.txt"
|
| 19 |
+
val_data: "Data/val_list.txt"
|
| 20 |
+
root_path: "/local/LJSpeech-1.1/wavs"
|
| 21 |
+
OOD_data: "Data/OOD_texts.txt"
|
| 22 |
+
min_length: 50 # sample until texts with this size are obtained for OOD texts
|
| 23 |
+
|
| 24 |
+
preprocess_params:
|
| 25 |
+
sr: 24000
|
| 26 |
+
spect_params:
|
| 27 |
+
n_fft: 2048
|
| 28 |
+
win_length: 1200
|
| 29 |
+
hop_length: 300
|
| 30 |
+
|
| 31 |
+
model_params:
|
| 32 |
+
multispeaker: true
|
| 33 |
+
|
| 34 |
+
dim_in: 64
|
| 35 |
+
hidden_dim: 512
|
| 36 |
+
max_conv_dim: 512
|
| 37 |
+
n_layer: 3
|
| 38 |
+
n_mels: 80
|
| 39 |
+
|
| 40 |
+
n_token: 178 # number of phoneme tokens
|
| 41 |
+
max_dur: 50 # maximum duration of a single phoneme
|
| 42 |
+
style_dim: 128 # style vector size
|
| 43 |
+
|
| 44 |
+
dropout: 0.2
|
| 45 |
+
|
| 46 |
+
# config for decoder
|
| 47 |
+
decoder:
|
| 48 |
+
type: 'hifigan' # either hifigan or istftnet
|
| 49 |
+
resblock_kernel_sizes: [3,7,11]
|
| 50 |
+
upsample_rates : [10,5,3,2]
|
| 51 |
+
upsample_initial_channel: 512
|
| 52 |
+
resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]]
|
| 53 |
+
upsample_kernel_sizes: [20,10,6,4]
|
| 54 |
+
|
| 55 |
+
# speech language model config
|
| 56 |
+
slm:
|
| 57 |
+
model: 'microsoft/wavlm-base-plus'
|
| 58 |
+
sr: 16000 # sampling rate of SLM
|
| 59 |
+
hidden: 768 # hidden size of SLM
|
| 60 |
+
nlayers: 13 # number of layers of SLM
|
| 61 |
+
initial_channel: 64 # initial channels of SLM discriminator head
|
| 62 |
+
|
| 63 |
+
# style diffusion model config
|
| 64 |
+
diffusion:
|
| 65 |
+
embedding_mask_proba: 0.1
|
| 66 |
+
# transformer config
|
| 67 |
+
transformer:
|
| 68 |
+
num_layers: 3
|
| 69 |
+
num_heads: 8
|
| 70 |
+
head_features: 64
|
| 71 |
+
multiplier: 2
|
| 72 |
+
|
| 73 |
+
# diffusion distribution config
|
| 74 |
+
dist:
|
| 75 |
+
sigma_data: 0.2 # placeholder for estimate_sigma_data set to false
|
| 76 |
+
estimate_sigma_data: true # estimate sigma_data from the current batch if set to true
|
| 77 |
+
mean: -3.0
|
| 78 |
+
std: 1.0
|
| 79 |
+
|
| 80 |
+
loss_params:
|
| 81 |
+
lambda_mel: 5. # mel reconstruction loss
|
| 82 |
+
lambda_gen: 1. # generator loss
|
| 83 |
+
lambda_slm: 1. # slm feature matching loss
|
| 84 |
+
|
| 85 |
+
lambda_mono: 1. # monotonic alignment loss (TMA)
|
| 86 |
+
lambda_s2s: 1. # sequence-to-sequence loss (TMA)
|
| 87 |
+
|
| 88 |
+
lambda_F0: 1. # F0 reconstruction loss
|
| 89 |
+
lambda_norm: 1. # norm reconstruction loss
|
| 90 |
+
lambda_dur: 1. # duration loss
|
| 91 |
+
lambda_ce: 20. # duration predictor probability output CE loss
|
| 92 |
+
lambda_sty: 1. # style reconstruction loss
|
| 93 |
+
lambda_diff: 1. # score matching loss
|
| 94 |
+
|
| 95 |
+
diff_epoch: 10 # style diffusion starting epoch
|
| 96 |
+
joint_epoch: 30 # joint training starting epoch
|
| 97 |
+
|
| 98 |
+
optimizer_params:
|
| 99 |
+
lr: 0.0001 # general learning rate
|
| 100 |
+
bert_lr: 0.00001 # learning rate for PLBERT
|
| 101 |
+
ft_lr: 0.0001 # learning rate for acoustic modules
|
| 102 |
+
|
| 103 |
+
slmadv_params:
|
| 104 |
+
min_len: 400 # minimum length of samples
|
| 105 |
+
max_len: 500 # maximum length of samples
|
| 106 |
+
batch_percentage: 0.5 # to prevent out of memory, only use half of the original batch size
|
| 107 |
+
iter: 10 # update the discriminator every this iterations of generator update
|
| 108 |
+
thresh: 5 # gradient norm above which the gradient is scaled
|
| 109 |
+
scale: 0.01 # gradient scaling factor for predictors from SLM discriminators
|
| 110 |
+
sig: 1.5 # sigma for differentiable duration modeling
|
| 111 |
+
|
Configs/config_libritts.yml
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
log_dir: "Models/LibriTTS"
|
| 2 |
+
first_stage_path: "first_stage.pth"
|
| 3 |
+
save_freq: 1
|
| 4 |
+
log_interval: 10
|
| 5 |
+
device: "cuda"
|
| 6 |
+
epochs_1st: 5 # number of epochs for first stage training (pre-training)
|
| 7 |
+
epochs_2nd: 2 # number of peochs for second stage training (joint training)
|
| 8 |
+
batch_size: 8
|
| 9 |
+
max_len: 400 # maximum number of frames
|
| 10 |
+
pretrained_model: ""
|
| 11 |
+
second_stage_load_pretrained: true # set to true if the pre-trained model is for 2nd stage
|
| 12 |
+
load_only_params: false # set to true if do not want to load epoch numbers and optimizer parameters
|
| 13 |
+
|
| 14 |
+
F0_path: "Utils/JDC/bst.t7"
|
| 15 |
+
ASR_config: "Utils/ASR/config.yml"
|
| 16 |
+
ASR_path: "Utils/ASR/epoch_00080_191_full.pth"
|
| 17 |
+
PLBERT_dir: 'Utils/PLBERT/'
|
| 18 |
+
|
| 19 |
+
data_params:
|
| 20 |
+
train_data: "/workspace/train.txt"
|
| 21 |
+
val_data: "/workspace/val.txt"
|
| 22 |
+
root_path: ""
|
| 23 |
+
OOD_data: "Data/OOD_texts.txt"
|
| 24 |
+
min_length: 50 # sample until texts with this size are obtained for OOD texts
|
| 25 |
+
|
| 26 |
+
preprocess_params:
|
| 27 |
+
sr: 24000
|
| 28 |
+
spect_params:
|
| 29 |
+
n_fft: 2048
|
| 30 |
+
win_length: 1200
|
| 31 |
+
hop_length: 300
|
| 32 |
+
|
| 33 |
+
model_params:
|
| 34 |
+
multispeaker: true
|
| 35 |
+
|
| 36 |
+
dim_in: 64
|
| 37 |
+
hidden_dim: 512
|
| 38 |
+
max_conv_dim: 512
|
| 39 |
+
n_layer: 3
|
| 40 |
+
n_mels: 80
|
| 41 |
+
|
| 42 |
+
n_token: 191 # number of phoneme tokens
|
| 43 |
+
max_dur: 50 # maximum duration of a single phoneme
|
| 44 |
+
style_dim: 128 # style vector size
|
| 45 |
+
|
| 46 |
+
dropout: 0.2
|
| 47 |
+
|
| 48 |
+
# config for decoder
|
| 49 |
+
decoder:
|
| 50 |
+
type: 'hifigan' # either hifigan or istftnet
|
| 51 |
+
resblock_kernel_sizes: [3,7,11]
|
| 52 |
+
upsample_rates : [10,5,3,2]
|
| 53 |
+
upsample_initial_channel: 512
|
| 54 |
+
resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]]
|
| 55 |
+
upsample_kernel_sizes: [20,10,6,4]
|
| 56 |
+
|
| 57 |
+
# speech language model config
|
| 58 |
+
slm:
|
| 59 |
+
model: 'microsoft/wavlm-base-plus'
|
| 60 |
+
sr: 16000 # sampling rate of SLM
|
| 61 |
+
hidden: 768 # hidden size of SLM
|
| 62 |
+
nlayers: 13 # number of layers of SLM
|
| 63 |
+
initial_channel: 64 # initial channels of SLM discriminator head
|
| 64 |
+
|
| 65 |
+
# style diffusion model config
|
| 66 |
+
diffusion:
|
| 67 |
+
embedding_mask_proba: 0.1
|
| 68 |
+
# transformer config
|
| 69 |
+
transformer:
|
| 70 |
+
num_layers: 3
|
| 71 |
+
num_heads: 8
|
| 72 |
+
head_features: 64
|
| 73 |
+
multiplier: 2
|
| 74 |
+
|
| 75 |
+
# diffusion distribution config
|
| 76 |
+
dist:
|
| 77 |
+
sigma_data: 0.2 # placeholder for estimate_sigma_data set to false
|
| 78 |
+
estimate_sigma_data: true # estimate sigma_data from the current batch if set to true
|
| 79 |
+
mean: -3.0
|
| 80 |
+
std: 1.0
|
| 81 |
+
|
| 82 |
+
loss_params:
|
| 83 |
+
lambda_mel: 5. # mel reconstruction loss
|
| 84 |
+
lambda_gen: 1. # generator loss
|
| 85 |
+
lambda_slm: 1. # slm feature matching loss
|
| 86 |
+
|
| 87 |
+
lambda_mono: 1. # monotonic alignment loss (1st stage, TMA)
|
| 88 |
+
lambda_s2s: 1. # sequence-to-sequence loss (1st stage, TMA)
|
| 89 |
+
TMA_epoch: 2 # TMA starting epoch (1st stage)
|
| 90 |
+
|
| 91 |
+
lambda_F0: 1. # F0 reconstruction loss (2nd stage)
|
| 92 |
+
lambda_norm: 1. # norm reconstruction loss (2nd stage)
|
| 93 |
+
lambda_dur: 1. # duration loss (2nd stage)
|
| 94 |
+
lambda_ce: 20. # duration predictor probability output CE loss (2nd stage)
|
| 95 |
+
lambda_sty: 1. # style reconstruction loss (2nd stage)
|
| 96 |
+
lambda_diff: 1. # score matching loss (2nd stage)
|
| 97 |
+
|
| 98 |
+
diff_epoch: 1 # style diffusion starting epoch (2nd stage)
|
| 99 |
+
joint_epoch: 2 # joint training starting epoch (2nd stage)
|
| 100 |
+
|
| 101 |
+
optimizer_params:
|
| 102 |
+
lr: 0.0001 # general learning rate
|
| 103 |
+
bert_lr: 0.00001 # learning rate for PLBERT
|
| 104 |
+
ft_lr: 0.00001 # learning rate for acoustic modules
|
| 105 |
+
|
| 106 |
+
slmadv_params:
|
| 107 |
+
min_len: 300 # minimum length of samples
|
| 108 |
+
max_len: 400 # maximum length of samples
|
| 109 |
+
batch_percentage: 0.5 # to prevent out of memory, only use half of the original batch size
|
| 110 |
+
iter: 20 # update the discriminator every this iterations of generator update
|
| 111 |
+
thresh: 5 # gradient norm above which the gradient is scaled
|
| 112 |
+
scale: 0.01 # gradient scaling factor for predictors from SLM discriminators
|
| 113 |
+
sig: 1.5 # sigma for differentiable duration modeling
|
Data/OOD_texts.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e0989ef6a9873b711befefcbe60660ced7a65532359277f766f4db504c558a72
|
| 3 |
+
size 31758898
|
Data/train_list.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Data/val_list.txt
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
LJ022-0023.wav|ðɪ ˌoʊvɚwˈɛlmɪŋ mədʒˈɔːɹᵻɾi ʌv pˈiːpəl ɪn ðɪs kˈʌntɹi nˈoʊ hˌaʊ tə sˈɪft ðə wˈiːt fɹʌmðə tʃˈæf ɪn wʌt ðeɪ hˈɪɹ ænd wʌt ðeɪ ɹˈiːd .|0
|
| 2 |
+
LJ043-0030.wav|ɪf sˈʌmbɑːdi dˈɪd ðˈæt tə mˌiː , ɐ lˈaʊsi tɹˈɪk lˈaɪk ðˈæt , tə tˈeɪk maɪ wˈaɪf ɐwˈeɪ , ænd ˈɔːl ðə fˈɜːnɪtʃɚ , aɪ wʊd biː mˈæd æz hˈɛl , tˈuː .|0
|
| 3 |
+
LJ005-0201.wav|ˌæzˌɪz ʃˈoʊn baɪ ðə ɹᵻpˈoːɹt ʌvðə kəmˈɪʃənɚz tʊ ɪŋkwˈaɪɚɹ ˌɪntʊ ðə stˈeɪt ʌvðə mjuːnˈɪsɪpəl kˌɔːɹpɚɹˈeɪʃənz ɪn ˈeɪtiːn θˈɜːɾi fˈaɪv .|0
|
| 4 |
+
LJ001-0110.wav|ˈiːvən ðə kˈæslɑːn tˈaɪp wɛn ɛnlˈɑːɹdʒd ʃˈoʊz ɡɹˈeɪt ʃˈɔːɹtkʌmɪŋz ɪn ðɪs ɹᵻspˈɛkt :|0
|
| 5 |
+
LJ003-0345.wav|ˈɔːl ðə kəmˈɪɾi kʊd dˈuː ɪn ðɪs ɹᵻspˈɛkt wʌz tə θɹˈoʊ ðə ɹᵻspˌɑːnsəbˈɪlɪɾi ˌɔn ˈʌðɚz .|0
|
| 6 |
+
LJ007-0154.wav|ðiːz pˈʌndʒənt ænd wˈɛl ɡɹˈaʊndᵻd stɹˈɪktʃɚz ɐplˈaɪd wɪð stˈɪl ɡɹˈeɪɾɚ fˈoːɹs tə ðɪ ʌŋkənvˈɪktᵻd pɹˈɪzənɚ , ðə mˈæn hˌuː kˈeɪm tə ðə pɹˈɪzən ˈɪnəsənt , ænd stˈɪl ʌŋkəntˈæmᵻnˌeɪɾᵻd ,|0
|
| 7 |
+
LJ018-0098.wav|ænd ɹˈɛkəɡnˌaɪzd æz wˈʌn ʌvðə fɹˈiːkwɛntɚz ʌvðə bˈoʊɡəs lˈɔː stˈeɪʃənɚz . hɪz ɚɹˈɛst lˈɛd tə ðæt ʌv ˈʌðɚz .|0
|
| 8 |
+
LJ047-0044.wav|ˈɑːswəld wʌz , haʊˈɛvɚ , wˈɪlɪŋ tə dɪskˈʌs hɪz kˈɑːntækts wɪð sˈoʊviət ɐθˈɔːɹɪɾiz . hiː dᵻnˈaɪd hˌævɪŋ ˌɛni ɪnvˈɑːlvmənt wɪð sˈoʊviət ɪntˈɛlɪdʒəns ˈeɪdʒənsiz|0
|
| 9 |
+
LJ031-0038.wav|ðə fˈɜːst fɪzˈɪʃən tə sˈiː ðə pɹˈɛzɪdənt æt pˈɑːɹklənd hˈɑːspɪɾəl wʌz dˈɑːktɚ . tʃˈɑːɹlz dʒˈeɪ . kˈæɹɪkˌoʊ , ɐ ɹˈɛzᵻdənt ɪn dʒˈɛnɚɹəl sˈɜːdʒɚɹi .|0
|
| 10 |
+
LJ048-0194.wav|dˈʊɹɹɪŋ ðə mˈɔːɹnɪŋ ʌv noʊvˈɛmbɚ twˈɛnti tˈuː pɹˈaɪɚ tə ðə mˈoʊɾɚkˌeɪd .|0
|
| 11 |
+
LJ049-0026.wav|ˌɔn əkˈeɪʒən ðə sˈiːkɹᵻt sˈɜːvɪs hɐzbɪn pɚmˈɪɾᵻd tə hæv ɐn ˈeɪdʒənt ɹˈaɪdɪŋ ɪnðə pˈæsɪndʒɚ kəmpˈɑːɹtmənt wɪððə pɹˈɛzɪdənt .|0
|
| 12 |
+
LJ004-0152.wav|ɔːlðˈoʊ æt mˈɪstɚ . bˈʌkstənz vˈɪzɪt ɐ nˈuː dʒˈeɪl wʌz ɪn pɹˈɑːsɛs ʌv ɪɹˈɛkʃən , ðə fˈɜːst stˈɛp təwˈɔːɹdz ɹᵻfˈɔːɹm sˈɪns hˈaʊɚdz vˌɪzɪtˈeɪʃən ɪn sˈɛvəntˌiːn sˈɛvənti fˈoːɹ .|0
|
| 13 |
+
LJ008-0278.wav|ɔːɹ ðˈɛɹz mˌaɪt biː wˈʌn ʌv mˈɛni , ænd ɪt mˌaɪt biː kənsˈɪdɚd nˈɛsᵻsɚɹi tə dˈɑːlɚ mˌeɪk ɐn ɛɡzˈæmpəl.dˈɑːlɚ|0
|
| 14 |
+
LJ043-0002.wav|ðə wˈɔːɹəŋ kəmˈɪʃən ɹᵻpˈoːɹt . baɪ ðə pɹˈɛzɪdənts kəmˈɪʃən ɔnðɪ ɐsˌæsᵻnˈeɪʃən ʌv pɹˈɛzɪdənt kˈɛnədi . tʃˈæptɚ sˈɛvən . lˈiː hˈɑːɹvi ˈɑːswəld :|0
|
| 15 |
+
LJ009-0114.wav|mˈɪstɚ . wˈeɪkfiːld wˈaɪndz ˈʌp hɪz ɡɹˈæfɪk bˌʌt sˈʌmwʌt sɛnsˈeɪʃənəl ɐkˈaʊnt baɪ dᵻskɹˈaɪbɪŋ ɐnˈʌðɚ ɹᵻlˈɪdʒəs sˈɜːvɪs , wˌɪtʃ mˈeɪ ɐpɹˈoʊpɹɪˌeɪtli biː ɪnsˈɜːɾᵻd hˈɪɹ .|0
|
| 16 |
+
LJ028-0506.wav|ɐ mˈɑːdɚn ˈɑːɹɾɪst wʊdhɐv dˈɪfɪkˌʌlti ɪn dˌuːɪŋ sˈʌtʃ ˈækjʊɹət wˈɜːk .|0
|
| 17 |
+
LJ050-0168.wav|wɪððə pɚtˈɪkjʊlɚ pˈɜːpəsᵻz ʌvðɪ ˈeɪdʒənsi ɪnvˈɑːlvd . ðə kəmˈɪʃən ɹˈɛkəɡnˌaɪzᵻz ðæt ðɪs ɪz ɐ kˌɑːntɹəvˈɜːʃəl ˈɛɹiə|0
|
| 18 |
+
LJ039-0223.wav|ˈɑːswəldz mɚɹˈiːn tɹˈeɪnɪŋ ɪn mˈɑːɹksmənʃˌɪp , hɪz ˈʌðɚ ɹˈaɪfəl ɛkspˈiəɹɪəns ænd hɪz ɪstˈæblɪʃt fəmˌɪliˈæɹɪɾi wɪð ðɪs pɚtˈɪkjʊlɚ wˈɛpən|0
|
| 19 |
+
LJ029-0032.wav|ɐkˈoːɹdɪŋ tʊ oʊdˈɑːnəl , kwˈoʊt , wiː hæd ɐ mˈoʊɾɚkˌeɪd wɛɹˈɛvɚ kplˈʌsplʌs wˌɪtʃ hɐdbɪn bˌɪn hˈeɪstili sˈʌmənd fɚðə ðə pˈɜːpəs wiː wˈɛnt , ˈɛnd kwˈoʊt .|0
|
| 20 |
+
LJ031-0070.wav|dˈɑːktɚ . klˈɑːɹk , hˌuː mˈoʊst klˈoʊsli əbzˈɜːvd ðə hˈɛd wˈuːnd ,|0
|
| 21 |
+
LJ034-0198.wav|jˈuːɪnz , hˌuː wʌz ɔnðə saʊθwˈɛst kˈɔːɹnɚɹ ʌv ˈɛlm ænd hjˈuːstən stɹˈiːts tˈɛstᵻfˌaɪd ðæt hiː kʊd nˌɑːt dᵻskɹˈaɪb ðə mˈæn hiː sˈɔː ɪnðə wˈɪndoʊ .|0
|
| 22 |
+
LJ026-0068.wav|ˈɛnɚdʒi ˈɛntɚz ðə plˈænt , tʊ ɐ smˈɔːl ɛkstˈɛnt ,|0
|
| 23 |
+
LJ039-0075.wav|wˈʌns juː nˈoʊ ðæt juː mˈʌst pˌʊt ðə kɹˈɔshɛɹz ɔnðə tˈɑːɹɡɪt ænd ðæt ɪz ˈɔːl ðæt ɪz nˈɛsᵻsɚɹi .|0
|
| 24 |
+
LJ004-0096.wav|ðə fˈeɪɾəl kˈɑːnsɪkwənsᵻz wˈɛɹɑːf mˌaɪt biː pɹɪvˈɛntᵻd ɪf ðə dʒˈʌstɪsᵻz ʌvðə pˈiːs wɜː djˈuːli ˈɔːθɚɹˌaɪzd|0
|
| 25 |
+
LJ005-0014.wav|spˈiːkɪŋ ˌɔn ɐ dᵻbˈeɪt ˌɔn pɹˈɪzən mˈæɾɚz , hiː dᵻklˈɛɹd ðˈæt|0
|
| 26 |
+
LJ012-0161.wav|hiː wʌz ɹᵻpˈoːɹɾᵻd tə hæv fˈɔːlən ɐwˈeɪ tʊ ɐ ʃˈædoʊ .|0
|
| 27 |
+
LJ018-0239.wav|hɪz dˌɪsɐpˈɪɹəns ɡˈeɪv kˈʌlɚ ænd sˈʌbstəns tʊ ˈiːvəl ɹᵻpˈoːɹts ɔːlɹˌɛdi ɪn sˌɜːkjʊlˈeɪʃən ðætðə wɪl ænd kənvˈeɪəns əbˌʌv ɹᵻfˈɜːd tuː|0
|
| 28 |
+
LJ019-0257.wav|hˈɪɹ ðə tɹˈɛd wˈiːl wʌz ɪn jˈuːs , ðɛɹ sˈɛljʊlɚ kɹˈæŋks , ɔːɹ hˈɑːɹd lˈeɪbɚ məʃˈiːnz .|0
|
| 29 |
+
LJ028-0008.wav|juː tˈæp dʒˈɛntli wɪð jʊɹ hˈiːl əpˌɑːn ðə ʃˈoʊldɚɹ ʌvðə dɹˈoʊmdɚɹi tʊ ˈɜːdʒ hɜːɹ ˈɔn .|0
|
| 30 |
+
LJ024-0083.wav|ðɪs plˈæn ʌv mˈaɪn ɪz nˈoʊ ɐtˈæk ɔnðə kˈoːɹt ;|0
|
| 31 |
+
LJ042-0129.wav|nˈoʊ nˈaɪt klˈʌbz ɔːɹ bˈoʊlɪŋ ˈælɪz , nˈoʊ plˈeɪsᵻz ʌv ɹˌɛkɹiːˈeɪʃən ɛksˈɛpt ðə tɹˈeɪd jˈuːniən dˈænsᵻz . aɪ hæv hæd ɪnˈʌf .|0
|
| 32 |
+
LJ036-0103.wav|ðə pəlˈiːs ˈæskt hˌɪm wˈɛðɚ hiː kʊd pˈɪk ˈaʊt hɪz pˈæsɪndʒɚ fɹʌmðə lˈaɪnʌp .|0
|
| 33 |
+
LJ046-0058.wav|dˈʊɹɹɪŋ hɪz pɹˈɛzɪdənsi , fɹˈæŋklɪn dˈiː . ɹˈoʊzəvˌɛlt mˌeɪd ˈɔːlmoʊst fˈoːɹ hˈʌndɹɪd dʒˈɜːniz ænd tɹˈævəld mˈoːɹ ðɐn θɹˈiː hˈʌndɹɪd fˈɪfti θˈaʊzənd mˈaɪlz .|0
|
| 34 |
+
LJ014-0076.wav|hiː wʌz sˈiːn ˈæftɚwɚdz smˈoʊkɪŋ ænd tˈɔːkɪŋ wɪð hɪz hˈoʊsts ɪn ðɛɹ bˈæk pˈɑːɹlɚ , ænd nˈɛvɚ sˈiːn ɐɡˈɛn ɐlˈaɪv .|0
|
| 35 |
+
LJ002-0043.wav|lˈɔŋ nˈæɹoʊ ɹˈuːmz wˈʌn θˈɜːɾi sˈɪks fˈiːt , sˈɪks twˈɛnti θɹˈiː fˈiːt , ænd ðɪ ˈeɪtθ ˈeɪtiːn ,|0
|
| 36 |
+
LJ009-0076.wav|wiː kˈʌm tə ðə sˈɜːmən .|0
|
| 37 |
+
LJ017-0131.wav|ˈiːvən wɛn ðə hˈaɪ ʃˈɛɹɪf hæd tˈoʊld hˌɪm ðɛɹwˌʌz nˈoʊ pˌɑːsəbˈɪlɪɾi əvɚ ɹᵻpɹˈiːv , ænd wɪðˌɪn ɐ fjˈuː ˈaʊɚz ʌv ˌɛksɪkjˈuːʃən .|0
|
| 38 |
+
LJ046-0184.wav|bˌʌt ðɛɹ ɪz ɐ sˈɪstəm fɚðɪ ɪmˈiːdɪət nˌoʊɾɪfɪkˈeɪʃən ʌvðə sˈiːkɹᵻt sˈɜːvɪs baɪ ðə kənfˈaɪnɪŋ ˌɪnstɪtˈuːʃən wɛn ɐ sˈʌbdʒɛkt ɪz ɹᵻlˈiːst ɔːɹ ɛskˈeɪps .|0
|
| 39 |
+
LJ014-0263.wav|wˌɛn ˈʌðɚ plˈɛʒɚz pˈɔːld hiː tˈʊk ɐ θˈiəɾɚ , ænd pˈoʊzd æz ɐ mjuːnˈɪfɪsənt pˈeɪtɹən ʌvðə dɹəmˈæɾɪk ˈɑːɹt .|0
|
| 40 |
+
LJ042-0096.wav|ˈoʊld ɛkstʃˈeɪndʒ ɹˈeɪt ɪn ɐdˈɪʃən tə hɪz fˈæktɚɹi sˈælɚɹi ʌv ɐpɹˈɑːksɪmətli ˈiːkwəl ɐmˈaʊnt|0
|
| 41 |
+
LJ049-0050.wav|hˈɪl hæd bˈoʊθ fˈiːt ɔnðə kˈɑːɹ ænd wʌz klˈaɪmɪŋ ɐbˈoːɹd tʊ ɐsˈɪst pɹˈɛzɪdənt ænd mˈɪsɪz . kˈɛnədi .|0
|
| 42 |
+
LJ019-0186.wav|sˈiːɪŋ ðæt sˈɪns ðɪ ɪstˈæblɪʃmənt ʌvðə sˈɛntɹəl kɹˈɪmɪnəl kˈoːɹt , nˈuːɡeɪt ɹᵻsˈiːvd pɹˈɪzənɚz fɔːɹ tɹˈaɪəl fɹʌm sˈɛvɹəl kˈaʊntiz ,|0
|
| 43 |
+
LJ028-0307.wav|ðˈɛn lˈɛt twˈɛnti dˈeɪz pˈæs , ænd æt ðɪ ˈɛnd ʌv ðæt tˈaɪm stˈeɪʃən nˌɪɹ ðə tʃˈældæsəŋ ɡˈeɪts ɐ bˈɑːdi ʌv fˈoːɹ θˈaʊzənd .|0
|
| 44 |
+
LJ012-0235.wav|wˌaɪl ðeɪ wɜːɹ ɪn ɐ stˈeɪt ʌv ɪnsˌɛnsəbˈɪlɪɾi ðə mˈɜːdɚ wʌz kəmˈɪɾᵻd .|0
|
| 45 |
+
LJ034-0053.wav|ɹˈiːtʃt ðə sˈeɪm kəŋklˈuːʒən æz lætˈoʊnə ðætðə pɹˈɪnts fˈaʊnd ɔnðə kˈɑːɹtənz wɜː ðoʊz ʌv lˈiː hˈɑːɹvi ˈɑːswəld .|0
|
| 46 |
+
LJ014-0030.wav|ðiːz wɜː dˈæmnətˌoːɹi fˈækts wˌɪtʃ wˈɛl səpˈoːɹɾᵻd ðə pɹˌɑːsɪkjˈuːʃən .|0
|
| 47 |
+
LJ015-0203.wav|bˌʌt wɜː ðə pɹɪkˈɔːʃənz tˈuː mˈɪnɪt , ðə vˈɪdʒɪləns tˈuː klˈoʊs təbi ᵻlˈuːdᵻd ɔːɹ ˌoʊvɚkˈʌm ?|0
|
| 48 |
+
LJ028-0093.wav|bˌʌt hɪz skɹˈaɪb ɹˈoʊt ɪɾ ɪnðə mˈænɚ kˈʌstəmˌɛɹi fɚðə skɹˈaɪbz ʌv ðoʊz dˈeɪz tə ɹˈaɪt ʌv ðɛɹ ɹˈɔɪəl mˈæstɚz .|0
|
| 49 |
+
LJ002-0018.wav|ðɪ ɪnˈædɪkwəsi ʌvðə dʒˈeɪl wʌz nˈoʊɾɪst ænd ɹᵻpˈoːɹɾᵻd əpˌɑːn ɐɡˈɛn ænd ɐɡˈɛn baɪ ðə ɡɹˈænd dʒˈʊɹɹiz ʌvðə sˈɪɾi ʌv lˈʌndən ,|0
|
| 50 |
+
LJ028-0275.wav|æt lˈæst , ɪnðə twˈɛntiəθ mˈʌnθ ,|0
|
| 51 |
+
LJ012-0042.wav|wˌɪtʃ hiː kˈɛpt kənsˈiːld ɪn ɐ hˈaɪdɪŋ plˈeɪs wɪð ɐ tɹˈæp dˈoːɹ dʒˈʌst ˌʌndɚ hɪz bˈɛd .|0
|
| 52 |
+
LJ011-0096.wav|hiː mˈæɹid ɐ lˈeɪdi ˈɔːlsoʊ bᵻlˈɔŋɪŋ tə ðə səsˈaɪəɾi ʌv fɹˈɛndz , hˌuː bɹˈɔːt hˌɪm ɐ lˈɑːɹdʒ fˈɔːɹtʃʊn , wˈɪtʃ , ænd hɪz ˈoʊn mˈʌni , hiː pˌʊt ˌɪntʊ ɐ sˈɪɾi fˈɜːm ,|0
|
| 53 |
+
LJ036-0077.wav|ɹˈɑːdʒɚ dˈiː . kɹˈeɪɡ , ɐ dˈɛpjuːɾi ʃˈɛɹɪf ʌv dˈæləs kˈaʊnti ,|0
|
| 54 |
+
LJ016-0318.wav|ˈʌðɚɹ əfˈɪʃəlz , ɡɹˈeɪt lˈɔɪɚz , ɡˈʌvɚnɚz ʌv pɹˈɪzənz , ænd tʃˈæplɪnz səpˈoːɹɾᵻd ðɪs vjˈuː .|0
|
| 55 |
+
LJ013-0164.wav|hˌuː kˈeɪm fɹʌm hɪz ɹˈuːm ɹˈɛdi dɹˈɛst , ɐ səspˈɪʃəs sˈɜːkəmstˌæns , æz hiː wʌz ˈɔːlweɪz lˈeɪt ɪnðə mˈɔːɹnɪŋ .|0
|
| 56 |
+
LJ027-0141.wav|ɪz klˈoʊsli ɹᵻpɹədˈuːst ɪnðə lˈaɪf hˈɪstɚɹi ʌv ɛɡzˈɪstɪŋ dˈɪɹ . ɔːɹ , ɪn ˈʌðɚ wˈɜːdz ,|0
|
| 57 |
+
LJ028-0335.wav|ɐkˈoːɹdɪŋli ðeɪ kəmˈɪɾᵻd tə hˌɪm ðə kəmˈænd ʌv ðɛɹ hˈoʊl ˈɑːɹmi , ænd pˌʊt ðə kˈiːz ʌv ðɛɹ sˈɪɾi ˌɪntʊ hɪz hˈændz .|0
|
| 58 |
+
LJ031-0202.wav|mˈɪsɪz . kˈɛnədi tʃˈoʊz ðə hˈɑːspɪɾəl ɪn bəθˈɛzdə fɚðɪ ˈɔːtɑːpsi bɪkˈʌz ðə pɹˈɛzɪdənt hæd sˈɜːvd ɪnðə nˈeɪvi .|0
|
| 59 |
+
LJ021-0145.wav|fɹʌm ðoʊz wˈɪlɪŋ tə dʒˈɔɪn ɪn ɪstˈæblɪʃɪŋ ðɪs hˈo��pt fɔːɹ pˈiəɹɪəd ʌv pˈiːs ,|0
|
| 60 |
+
LJ016-0288.wav|dˈɑːlɚ mˈuːlɚ , mˈuːlɚ , hiːz ðə mˈæn , dˈɑːlɚ tˈɪl ɐ daɪvˈɜːʒən wʌz kɹiːˈeɪɾᵻd baɪ ðɪ ɐpˈɪɹəns ʌvðə ɡˈæloʊz , wˌɪtʃ wʌz ɹᵻsˈiːvd wɪð kəntˈɪnjuːəs jˈɛlz .|0
|
| 61 |
+
LJ028-0081.wav|jˈɪɹz lˈeɪɾɚ , wˌɛn ðɪ ˌɑːɹkiːˈɑːlədʒˌɪsts kʊd ɹˈɛdili dɪstˈɪŋɡwɪʃ ðə fˈɔls fɹʌmðə tɹˈuː ,|0
|
| 62 |
+
LJ018-0081.wav|hɪz dᵻfˈɛns bˌiːɪŋ ðæt hiː hæd ɪntˈɛndᵻd tə kəmˈɪt sˈuːɪsˌaɪd , bˌʌt ðˈæt , ɔnðɪ ɐpˈɪɹəns ʌv ðɪs ˈɑːfɪsɚ hˌuː hæd ɹˈɔŋd hˌɪm ,|0
|
| 63 |
+
LJ021-0066.wav|təɡˌɛðɚ wɪð ɐ ɡɹˈeɪt ˈɪŋkɹiːs ɪnðə pˈeɪɹoʊlz , ðɛɹ hɐz kˈʌm ɐ səbstˈænʃəl ɹˈaɪz ɪnðə tˈoʊɾəl ʌv ɪndˈʌstɹɪəl pɹˈɑːfɪts|0
|
| 64 |
+
LJ009-0238.wav|ˈæftɚ ðɪs ðə ʃˈɛɹɪfs sˈɛnt fɔːɹ ɐnˈʌðɚ ɹˈoʊp , bˌʌt ðə spɛktˈeɪɾɚz ˌɪntəfˈɪɹd , ænd ðə mˈæn wʌz kˈæɹid bˈæk tə dʒˈeɪl .|0
|
| 65 |
+
LJ005-0079.wav|ænd ɪmpɹˈuːv ðə mˈɔːɹəlz ʌvðə pɹˈɪzənɚz , ænd ʃˌæl ɪnʃˈʊɹ ðə pɹˈɑːpɚ mˈɛʒɚɹ ʌv pˈʌnɪʃmənt tə kənvˈɪktᵻd əfˈɛndɚz .|0
|
| 66 |
+
LJ035-0019.wav|dɹˈoʊv tə ðə nɔːɹθwˈɛst kˈɔːɹnɚɹ ʌv ˈɛlm ænd hjˈuːstən , ænd pˈɑːɹkt ɐpɹˈɑːksɪmətli tˈɛn fˈiːt fɹʌmðə tɹˈæfɪk sˈɪɡnəl .|0
|
| 67 |
+
LJ036-0174.wav|ðɪs ɪz ðɪ ɐpɹˈɑːksɪmət tˈaɪm hiː ˈɛntɚd ðə ɹˈuːmɪŋhˌaʊs , ɐkˈoːɹdɪŋ tʊ ˈɜːliːn ɹˈɑːbɚts , ðə hˈaʊskiːpɚ ðˈɛɹ .|0
|
| 68 |
+
LJ046-0146.wav|ðə kɹaɪtˈiəɹɪə ɪn ɪfˈɛkt pɹˈaɪɚ tə noʊvˈɛmbɚ twˈɛnti tˈuː , nˈaɪntiːn sˈɪksti θɹˈiː , fɔːɹ dɪtˈɜːmɪnɪŋ wˈɛðɚ tʊ ɐksˈɛpt mətˈɪɹiəl fɚðə pˌiːˌɑːɹɹˈɛs dʒˈɛnɚɹəl fˈaɪlz|0
|
| 69 |
+
LJ017-0044.wav|ænd ðə dˈiːpɪst æŋzˈaɪəɾi wʌz fˈɛlt ðætðə kɹˈaɪm , ɪf kɹˈaɪm ðˈɛɹ hɐdbɪn , ʃˌʊd biː bɹˈɔːt hˈoʊm tʊ ɪts pˈɜːpɪtɹˌeɪɾɚ .|0
|
| 70 |
+
LJ017-0070.wav|bˌʌt hɪz spˈoːɹɾɪŋ ˌɑːpɚɹˈeɪʃənz dɪdnˌɑːt pɹˈɑːspɚ , ænd hiː bɪkˌeɪm ɐ nˈiːdi mˈæn , ˈɔːlweɪz dɹˈɪvən tə dˈɛspɚɹət stɹˈeɪts fɔːɹ kˈæʃ .|0
|
| 71 |
+
LJ014-0020.wav|hiː wʌz sˈuːn ˈæftɚwɚdz ɚɹˈɛstᵻd ˌɔn səspˈɪʃən , ænd ɐ sˈɜːtʃ ʌv hɪz lˈɑːdʒɪŋz bɹˈɔːt tə lˈaɪt sˈɛvɹəl ɡˈɑːɹmənts sˈætʃɚɹˌeɪɾᵻd wɪð blˈʌd ;|0
|
| 72 |
+
LJ016-0020.wav|hiː nˈɛvɚ ɹˈiːtʃt ðə sˈɪstɚn , bˌʌt fˈɛl bˈæk ˌɪntʊ ðə jˈɑːɹd , ˈɪndʒɚɹɪŋ hɪz lˈɛɡz sᵻvˈɪɹli .|0
|
| 73 |
+
LJ045-0230.wav|wˌɛn hiː wʌz fˈaɪnəli ˌæpɹihˈɛndᵻd ɪnðə tˈɛksəs θˈiəɾɚ . ɔːlðˈoʊ ɪɾ ɪz nˌɑːt fˈʊli kɚɹˈɑːbɚɹˌeɪɾᵻd baɪ ˈʌðɚz hˌuː wɜː pɹˈɛzənt ,|0
|
| 74 |
+
LJ035-0129.wav|ænd ʃiː mˈʌstɐv ɹˈʌn dˌaʊn ðə stˈɛɹz ɐhˈɛd ʌv ˈɑːswəld ænd wʊd pɹˈɑːbəbli hæv sˈiːn ɔːɹ hˈɜːd hˌɪm .|0
|
| 75 |
+
LJ008-0307.wav|ˈæftɚwɚdz ɛkspɹˈɛs ɐ wˈɪʃ tə mˈɜːdɚ ðə ɹᵻkˈoːɹdɚ fɔːɹ hˌævɪŋ kˈɛpt ðˌɛm sˌoʊ lˈɔŋ ɪn səspˈɛns .|0
|
| 76 |
+
LJ008-0294.wav|nˌɪɹli ɪndˈɛfɪnətli dᵻfˈɜːd .|0
|
| 77 |
+
LJ047-0148.wav|ˌɔn ɑːktˈoʊbɚ twˈɛnti fˈaɪv ,|0
|
| 78 |
+
LJ008-0111.wav|ðeɪ ˈɛntɚd ɐ dˈɑːlɚ stˈoʊŋ kˈoʊld ɹˈuːm , dˈɑːlɚɹ ænd wɜː pɹˈɛzəntli dʒˈɔɪnd baɪ ðə pɹˈɪzənɚ .|0
|
| 79 |
+
LJ034-0042.wav|ðæt hiː kʊd ˈoʊnli tˈɛstᵻfˌaɪ wɪð sˈɜːtənti ðætðə pɹˈɪnt wʌz lˈɛs ðɐn θɹˈiː dˈeɪz ˈoʊld .|0
|
| 80 |
+
LJ037-0234.wav|mˈɪsɪz . mˈɛɹi bɹˈɑːk , ðə wˈaɪf əvə mɪkˈænɪk hˌuː wˈɜːkt æt ðə stˈeɪʃən , wʌz ðɛɹ æt ðə tˈaɪm ænd ʃiː sˈɔː ɐ wˈaɪt mˈeɪl ,|0
|
| 81 |
+
LJ040-0002.wav|tʃˈæptɚ sˈɛvən . lˈiː hˈɑːɹvi ˈɑːswəld : bˈækɡɹaʊnd ænd pˈɑːsᵻbəl mˈoʊɾɪvz , pˈɑːɹt wˌʌn .|0
|
| 82 |
+
LJ045-0140.wav|ðɪ ˈɑːɹɡjuːmənts hiː jˈuːzd tə dʒˈʌstᵻfˌaɪ hɪz jˈuːs ʌvðɪ ˈeɪliəs sədʒˈɛst ðæt ˈɑːswəld mˌeɪhɐv kˈʌm tə θˈɪŋk ðætðə hˈoʊl wˈɜːld wʌz bᵻkˈʌmɪŋ ɪnvˈɑːlvd|0
|
| 83 |
+
LJ012-0035.wav|ðə nˈʌmbɚ ænd nˈeɪmz ˌɔn wˈɑːtʃᵻz , wɜː kˈɛɹfəli ɹᵻmˈuːvd ɔːɹ əblˈɪɾɚɹˌeɪɾᵻd ˈæftɚ ðə ɡˈʊdz pˈæst ˌaʊɾəv hɪz hˈændz .|0
|
| 84 |
+
LJ012-0250.wav|ɔnðə sˈɛvənθ dʒuːlˈaɪ , ˈeɪtiːn θˈɜːɾi sˈɛvən ,|0
|
| 85 |
+
LJ016-0179.wav|kəntɹˈæktᵻd wɪð ʃˈɛɹɪfs ænd kənvˈiːnɚz tə wˈɜːk baɪ ðə dʒˈɑːb .|0
|
| 86 |
+
LJ016-0138.wav|æɾə dˈɪstəns fɹʌmðə pɹˈɪzən .|0
|
| 87 |
+
LJ027-0052.wav|ðiːz pɹˈɪnsɪpəlz ʌv həmˈɑːlədʒi ɑːɹ ᵻsˈɛnʃəl tʊ ɐ kɚɹˈɛkt ɪntˌɜːpɹɪtˈeɪʃən ʌvðə fˈækts ʌv mɔːɹfˈɑːlədʒi .|0
|
| 88 |
+
LJ031-0134.wav|ˌɔn wˈʌn əkˈeɪʒən mˈɪsɪz . dʒˈɑːnsən , ɐkˈʌmpənid baɪ tˈuː sˈiːkɹᵻt sˈɜːvɪs ˈeɪdʒənts , lˈɛft ðə ɹˈuːm tə sˈiː mˈɪsɪz . kˈɛnədi ænd mˈɪsɪz . kˈɑːnæli .|0
|
| 89 |
+
LJ019-0273.wav|wˌɪtʃ sˌɜː dʒˈɑːʃjuːə dʒˈɛb tˈoʊld ðə kəmˈɪɾi hiː kənsˈɪdɚd ðə pɹˈɑːpɚɹ ˈɛlɪmənts ʌv pˈiːnəl dˈɪsɪplˌɪn .|0
|
| 90 |
+
LJ014-0110.wav|æt ðə fˈɜːst ðə bˈɑːksᵻz wɜːɹ ɪmpˈaʊndᵻd , ˈoʊpənd , ænd fˈaʊnd tə kəntˈeɪn mˈɛnɪəv oʊkˈɑːnɚz ɪfˈɛkts .|0
|
| 91 |
+
LJ034-0160.wav|ˌɔn bɹˈɛnənz sˈʌbsᵻkwənt sˈɜːʔn̩ aɪdˈɛntɪfɪkˈeɪʃən ʌv lˈiː hˈɑːɹvi ˈɑːswəld æz ðə mˈæn hiː sˈɔː fˈaɪɚ ðə ɹˈaɪfəl .|0
|
| 92 |
+
LJ038-0199.wav|ᵻlˈɛvən . ɪf aɪɐm ɐlˈaɪv ænd tˈeɪkən pɹˈɪzənɚ ,|0
|
| 93 |
+
LJ014-0010.wav|jˈɛt hiː kʊd nˌɑːt ˌoʊvɚkˈʌm ðə stɹˈeɪndʒ fˌæsᵻnˈeɪʃən ɪt hˈæd fɔːɹ hˌɪm , ænd ɹᵻmˈeɪnd baɪ ðə sˈaɪd ʌvðə kˈɔːɹps tˈɪl ðə stɹˈɛtʃɚ kˈeɪm .|0
|
| 94 |
+
LJ033-0047.wav|aɪ nˈoʊɾɪst wɛn aɪ wɛnt ˈaʊt ðætðə lˈaɪt wʌz ˈɔn , ˈɛnd kwˈoʊt ,|0
|
| 95 |
+
LJ040-0027.wav|hiː wʌz nˈɛvɚ sˈæɾɪsfˌaɪd wɪð ˈɛnɪθˌɪŋ .|0
|
| 96 |
+
LJ048-0228.wav|ænd ˈʌðɚz hˌuː wɜː pɹˈɛzənt sˈeɪ ðæt nˈoʊ ˈeɪdʒənt wʌz ɪnˈiːbɹɪˌeɪɾᵻd ɔːɹ ˈæktᵻd ɪmpɹˈɑːpɚli .|0
|
| 97 |
+
LJ003-0111.wav|hiː wʌz ɪŋ kˈɑːnsɪkwəns pˌʊt ˌaʊɾəv ðə pɹətˈɛkʃən ʌv ðɛɹ ɪntˈɜːnəl lˈɔː , ˈɛnd kwˈoʊt . ðɛɹ kˈoʊd wʌzɐ sˈʌbdʒɛkt ʌv sˌʌm kjˌʊɹɹɪˈɔsɪɾi .|0
|
| 98 |
+
LJ008-0258.wav|lˈɛt mˌiː ɹᵻtɹˈeɪs maɪ stˈɛps , ænd spˈiːk mˈoːɹ ɪn diːtˈeɪl ʌvðə tɹˈiːtmənt ʌvðə kəndˈɛmd ɪn ðoʊz blˈʌdθɜːsti ænd bɹˈuːɾəli ɪndˈɪfɹənt dˈeɪz ,|0
|
| 99 |
+
LJ029-0022.wav|ðɪ ɚɹˈɪdʒɪnəl plˈæŋ kˈɔːld fɚðə pɹˈɛzɪdənt tə spˈɛnd ˈoʊnli wˈʌn dˈeɪ ɪnðə stˈeɪt , mˌeɪkɪŋ wˈɜːlwɪnd vˈɪzɪts tə dˈæləs , fˈɔːɹt wˈɜːθ , sˌæn æntˈoʊnɪˌoʊ , ænd hjˈuːstən .|0
|
| 100 |
+
LJ004-0045.wav|mˈɪstɚ . stˈɜːdʒᵻz bˈoːɹn , sˌɜː dʒˈeɪmz mˈækɪntˌɑːʃ , sˌɜː dʒˈeɪmz skˈɑːɹlɪt , ænd wˈɪljəm wˈɪlbɚfˌoːɹs .|0
|
Demo/Inference_LJSpeech.ipynb
ADDED
|
@@ -0,0 +1,554 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "9adb7bd1",
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"source": [
|
| 8 |
+
"# StyleTTS 2 Demo (LJSpeech)\n"
|
| 9 |
+
]
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"cell_type": "markdown",
|
| 13 |
+
"id": "6108384d",
|
| 14 |
+
"metadata": {},
|
| 15 |
+
"source": [
|
| 16 |
+
"### Utils"
|
| 17 |
+
]
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"cell_type": "code",
|
| 21 |
+
"execution_count": null,
|
| 22 |
+
"id": "96e173bf",
|
| 23 |
+
"metadata": {},
|
| 24 |
+
"outputs": [],
|
| 25 |
+
"source": [
|
| 26 |
+
"import torch\n",
|
| 27 |
+
"torch.manual_seed(0)\n",
|
| 28 |
+
"torch.backends.cudnn.benchmark = False\n",
|
| 29 |
+
"torch.backends.cudnn.deterministic = True\n",
|
| 30 |
+
"\n",
|
| 31 |
+
"import random\n",
|
| 32 |
+
"random.seed(0)\n",
|
| 33 |
+
"\n",
|
| 34 |
+
"import numpy as np\n",
|
| 35 |
+
"np.random.seed(0)"
|
| 36 |
+
]
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"cell_type": "code",
|
| 40 |
+
"execution_count": null,
|
| 41 |
+
"id": "da84c60f",
|
| 42 |
+
"metadata": {},
|
| 43 |
+
"outputs": [],
|
| 44 |
+
"source": [
|
| 45 |
+
"%cd .."
|
| 46 |
+
]
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"cell_type": "code",
|
| 50 |
+
"execution_count": null,
|
| 51 |
+
"id": "5a3ddcc8",
|
| 52 |
+
"metadata": {},
|
| 53 |
+
"outputs": [],
|
| 54 |
+
"source": [
|
| 55 |
+
"# load packages\n",
|
| 56 |
+
"import time\n",
|
| 57 |
+
"import random\n",
|
| 58 |
+
"import yaml\n",
|
| 59 |
+
"from munch import Munch\n",
|
| 60 |
+
"import numpy as np\n",
|
| 61 |
+
"import torch\n",
|
| 62 |
+
"from torch import nn\n",
|
| 63 |
+
"import torch.nn.functional as F\n",
|
| 64 |
+
"import torchaudio\n",
|
| 65 |
+
"import librosa\n",
|
| 66 |
+
"from nltk.tokenize import word_tokenize\n",
|
| 67 |
+
"\n",
|
| 68 |
+
"from models import *\n",
|
| 69 |
+
"from utils import *\n",
|
| 70 |
+
"from text_utils import TextCleaner\n",
|
| 71 |
+
"textclenaer = TextCleaner()\n",
|
| 72 |
+
"\n",
|
| 73 |
+
"%matplotlib inline"
|
| 74 |
+
]
|
| 75 |
+
},
|
| 76 |
+
{
|
| 77 |
+
"cell_type": "code",
|
| 78 |
+
"execution_count": null,
|
| 79 |
+
"id": "bbdc04c0",
|
| 80 |
+
"metadata": {},
|
| 81 |
+
"outputs": [],
|
| 82 |
+
"source": [
|
| 83 |
+
"device = 'cuda' if torch.cuda.is_available() else 'cpu'"
|
| 84 |
+
]
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"cell_type": "code",
|
| 88 |
+
"execution_count": null,
|
| 89 |
+
"id": "00ee05e1",
|
| 90 |
+
"metadata": {},
|
| 91 |
+
"outputs": [],
|
| 92 |
+
"source": [
|
| 93 |
+
"to_mel = torchaudio.transforms.MelSpectrogram(\n",
|
| 94 |
+
" n_mels=80, n_fft=2048, win_length=1200, hop_length=300)\n",
|
| 95 |
+
"mean, std = -4, 4\n",
|
| 96 |
+
"\n",
|
| 97 |
+
"def length_to_mask(lengths):\n",
|
| 98 |
+
" mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)\n",
|
| 99 |
+
" mask = torch.gt(mask+1, lengths.unsqueeze(1))\n",
|
| 100 |
+
" return mask\n",
|
| 101 |
+
"\n",
|
| 102 |
+
"def preprocess(wave):\n",
|
| 103 |
+
" wave_tensor = torch.from_numpy(wave).float()\n",
|
| 104 |
+
" mel_tensor = to_mel(wave_tensor)\n",
|
| 105 |
+
" mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - mean) / std\n",
|
| 106 |
+
" return mel_tensor\n",
|
| 107 |
+
"\n",
|
| 108 |
+
"def compute_style(ref_dicts):\n",
|
| 109 |
+
" reference_embeddings = {}\n",
|
| 110 |
+
" for key, path in ref_dicts.items():\n",
|
| 111 |
+
" wave, sr = librosa.load(path, sr=24000)\n",
|
| 112 |
+
" audio, index = librosa.effects.trim(wave, top_db=30)\n",
|
| 113 |
+
" if sr != 24000:\n",
|
| 114 |
+
" audio = librosa.resample(audio, sr, 24000)\n",
|
| 115 |
+
" mel_tensor = preprocess(audio).to(device)\n",
|
| 116 |
+
"\n",
|
| 117 |
+
" with torch.no_grad():\n",
|
| 118 |
+
" ref = model.style_encoder(mel_tensor.unsqueeze(1))\n",
|
| 119 |
+
" reference_embeddings[key] = (ref.squeeze(1), audio)\n",
|
| 120 |
+
" \n",
|
| 121 |
+
" return reference_embeddings"
|
| 122 |
+
]
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"cell_type": "markdown",
|
| 126 |
+
"id": "7b9cecbe",
|
| 127 |
+
"metadata": {},
|
| 128 |
+
"source": [
|
| 129 |
+
"### Load models"
|
| 130 |
+
]
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"cell_type": "code",
|
| 134 |
+
"execution_count": null,
|
| 135 |
+
"id": "64fc4c0f",
|
| 136 |
+
"metadata": {},
|
| 137 |
+
"outputs": [],
|
| 138 |
+
"source": [
|
| 139 |
+
"# load phonemizer\n",
|
| 140 |
+
"import phonemizer\n",
|
| 141 |
+
"global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True)"
|
| 142 |
+
]
|
| 143 |
+
},
|
| 144 |
+
{
|
| 145 |
+
"cell_type": "code",
|
| 146 |
+
"execution_count": null,
|
| 147 |
+
"id": "48e7b644",
|
| 148 |
+
"metadata": {},
|
| 149 |
+
"outputs": [],
|
| 150 |
+
"source": [
|
| 151 |
+
"config = yaml.safe_load(open(\"Models/LJSpeech/config.yml\"))\n",
|
| 152 |
+
"\n",
|
| 153 |
+
"# load pretrained ASR model\n",
|
| 154 |
+
"ASR_config = config.get('ASR_config', False)\n",
|
| 155 |
+
"ASR_path = config.get('ASR_path', False)\n",
|
| 156 |
+
"text_aligner = load_ASR_models(ASR_path, ASR_config)\n",
|
| 157 |
+
"\n",
|
| 158 |
+
"# load pretrained F0 model\n",
|
| 159 |
+
"F0_path = config.get('F0_path', False)\n",
|
| 160 |
+
"pitch_extractor = load_F0_models(F0_path)\n",
|
| 161 |
+
"\n",
|
| 162 |
+
"# load BERT model\n",
|
| 163 |
+
"from Utils.PLBERT.util import load_plbert\n",
|
| 164 |
+
"BERT_path = config.get('PLBERT_dir', False)\n",
|
| 165 |
+
"plbert = load_plbert(BERT_path)"
|
| 166 |
+
]
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"cell_type": "code",
|
| 170 |
+
"execution_count": null,
|
| 171 |
+
"id": "ffc18cf7",
|
| 172 |
+
"metadata": {},
|
| 173 |
+
"outputs": [],
|
| 174 |
+
"source": [
|
| 175 |
+
"model = build_model(recursive_munch(config['model_params']), text_aligner, pitch_extractor, plbert)\n",
|
| 176 |
+
"_ = [model[key].eval() for key in model]\n",
|
| 177 |
+
"_ = [model[key].to(device) for key in model]"
|
| 178 |
+
]
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"cell_type": "code",
|
| 182 |
+
"execution_count": null,
|
| 183 |
+
"id": "64529d5c",
|
| 184 |
+
"metadata": {},
|
| 185 |
+
"outputs": [],
|
| 186 |
+
"source": [
|
| 187 |
+
"params_whole = torch.load(\"Models/LJSpeech/epoch_2nd_00100.pth\", map_location='cpu')\n",
|
| 188 |
+
"params = params_whole['net']"
|
| 189 |
+
]
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"cell_type": "code",
|
| 193 |
+
"execution_count": null,
|
| 194 |
+
"id": "895d9706",
|
| 195 |
+
"metadata": {},
|
| 196 |
+
"outputs": [],
|
| 197 |
+
"source": [
|
| 198 |
+
"for key in model:\n",
|
| 199 |
+
" if key in params:\n",
|
| 200 |
+
" print('%s loaded' % key)\n",
|
| 201 |
+
" try:\n",
|
| 202 |
+
" model[key].load_state_dict(params[key])\n",
|
| 203 |
+
" except:\n",
|
| 204 |
+
" from collections import OrderedDict\n",
|
| 205 |
+
" state_dict = params[key]\n",
|
| 206 |
+
" new_state_dict = OrderedDict()\n",
|
| 207 |
+
" for k, v in state_dict.items():\n",
|
| 208 |
+
" name = k[7:] # remove `module.`\n",
|
| 209 |
+
" new_state_dict[name] = v\n",
|
| 210 |
+
" # load params\n",
|
| 211 |
+
" model[key].load_state_dict(new_state_dict, strict=False)\n",
|
| 212 |
+
"# except:\n",
|
| 213 |
+
"# _load(params[key], model[key])\n",
|
| 214 |
+
"_ = [model[key].eval() for key in model]"
|
| 215 |
+
]
|
| 216 |
+
},
|
| 217 |
+
{
|
| 218 |
+
"cell_type": "code",
|
| 219 |
+
"execution_count": null,
|
| 220 |
+
"id": "c1a59db2",
|
| 221 |
+
"metadata": {},
|
| 222 |
+
"outputs": [],
|
| 223 |
+
"source": [
|
| 224 |
+
"from Modules.diffusion.sampler import DiffusionSampler, ADPM2Sampler, KarrasSchedule"
|
| 225 |
+
]
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"cell_type": "code",
|
| 229 |
+
"execution_count": null,
|
| 230 |
+
"id": "e30985ab",
|
| 231 |
+
"metadata": {},
|
| 232 |
+
"outputs": [],
|
| 233 |
+
"source": [
|
| 234 |
+
"sampler = DiffusionSampler(\n",
|
| 235 |
+
" model.diffusion.diffusion,\n",
|
| 236 |
+
" sampler=ADPM2Sampler(),\n",
|
| 237 |
+
" sigma_schedule=KarrasSchedule(sigma_min=0.0001, sigma_max=3.0, rho=9.0), # empirical parameters\n",
|
| 238 |
+
" clamp=False\n",
|
| 239 |
+
")"
|
| 240 |
+
]
|
| 241 |
+
},
|
| 242 |
+
{
|
| 243 |
+
"cell_type": "markdown",
|
| 244 |
+
"id": "b803110e",
|
| 245 |
+
"metadata": {},
|
| 246 |
+
"source": [
|
| 247 |
+
"### Synthesize speech"
|
| 248 |
+
]
|
| 249 |
+
},
|
| 250 |
+
{
|
| 251 |
+
"cell_type": "code",
|
| 252 |
+
"execution_count": null,
|
| 253 |
+
"id": "24655f46",
|
| 254 |
+
"metadata": {},
|
| 255 |
+
"outputs": [],
|
| 256 |
+
"source": [
|
| 257 |
+
"# synthesize a text\n",
|
| 258 |
+
"text = ''' StyleTTS 2 is a text-to-speech model that leverages style diffusion and adversarial training with large speech language models to achieve human-level text-to-speech synthesis. '''"
|
| 259 |
+
]
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"cell_type": "code",
|
| 263 |
+
"execution_count": null,
|
| 264 |
+
"id": "ca57469c",
|
| 265 |
+
"metadata": {},
|
| 266 |
+
"outputs": [],
|
| 267 |
+
"source": [
|
| 268 |
+
"def inference(text, noise, diffusion_steps=5, embedding_scale=1):\n",
|
| 269 |
+
" text = text.strip()\n",
|
| 270 |
+
" text = text.replace('\"', '')\n",
|
| 271 |
+
" ps = global_phonemizer.phonemize([text])\n",
|
| 272 |
+
" ps = word_tokenize(ps[0])\n",
|
| 273 |
+
" ps = ' '.join(ps)\n",
|
| 274 |
+
"\n",
|
| 275 |
+
" tokens = textclenaer(ps)\n",
|
| 276 |
+
" tokens.insert(0, 0)\n",
|
| 277 |
+
" tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n",
|
| 278 |
+
" \n",
|
| 279 |
+
" with torch.no_grad():\n",
|
| 280 |
+
" input_lengths = torch.LongTensor([tokens.shape[-1]]).to(tokens.device)\n",
|
| 281 |
+
" text_mask = length_to_mask(input_lengths).to(tokens.device)\n",
|
| 282 |
+
"\n",
|
| 283 |
+
" t_en = model.text_encoder(tokens, input_lengths, text_mask)\n",
|
| 284 |
+
" bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())\n",
|
| 285 |
+
" d_en = model.bert_encoder(bert_dur).transpose(-1, -2) \n",
|
| 286 |
+
"\n",
|
| 287 |
+
" s_pred = sampler(noise, \n",
|
| 288 |
+
" embedding=bert_dur[0].unsqueeze(0), num_steps=diffusion_steps,\n",
|
| 289 |
+
" embedding_scale=embedding_scale).squeeze(0)\n",
|
| 290 |
+
"\n",
|
| 291 |
+
" s = s_pred[:, 128:]\n",
|
| 292 |
+
" ref = s_pred[:, :128]\n",
|
| 293 |
+
"\n",
|
| 294 |
+
" d = model.predictor.text_encoder(d_en, s, input_lengths, text_mask)\n",
|
| 295 |
+
"\n",
|
| 296 |
+
" x, _ = model.predictor.lstm(d)\n",
|
| 297 |
+
" duration = model.predictor.duration_proj(x)\n",
|
| 298 |
+
" duration = torch.sigmoid(duration).sum(axis=-1)\n",
|
| 299 |
+
" pred_dur = torch.round(duration.squeeze()).clamp(min=1)\n",
|
| 300 |
+
"\n",
|
| 301 |
+
" pred_dur[-1] += 5\n",
|
| 302 |
+
"\n",
|
| 303 |
+
" pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))\n",
|
| 304 |
+
" c_frame = 0\n",
|
| 305 |
+
" for i in range(pred_aln_trg.size(0)):\n",
|
| 306 |
+
" pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1\n",
|
| 307 |
+
" c_frame += int(pred_dur[i].data)\n",
|
| 308 |
+
"\n",
|
| 309 |
+
" # encode prosody\n",
|
| 310 |
+
" en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 311 |
+
" F0_pred, N_pred = model.predictor.F0Ntrain(en, s)\n",
|
| 312 |
+
" out = model.decoder((t_en @ pred_aln_trg.unsqueeze(0).to(device)), \n",
|
| 313 |
+
" F0_pred, N_pred, ref.squeeze().unsqueeze(0))\n",
|
| 314 |
+
" \n",
|
| 315 |
+
" return out.squeeze().cpu().numpy()"
|
| 316 |
+
]
|
| 317 |
+
},
|
| 318 |
+
{
|
| 319 |
+
"cell_type": "markdown",
|
| 320 |
+
"id": "d438ef4f",
|
| 321 |
+
"metadata": {},
|
| 322 |
+
"source": [
|
| 323 |
+
"#### Basic synthesis (5 diffusion steps)"
|
| 324 |
+
]
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"cell_type": "code",
|
| 328 |
+
"execution_count": null,
|
| 329 |
+
"id": "d3d7f7d5",
|
| 330 |
+
"metadata": {
|
| 331 |
+
"scrolled": true
|
| 332 |
+
},
|
| 333 |
+
"outputs": [],
|
| 334 |
+
"source": [
|
| 335 |
+
"start = time.time()\n",
|
| 336 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 337 |
+
"wav = inference(text, noise, diffusion_steps=5, embedding_scale=1)\n",
|
| 338 |
+
"rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 339 |
+
"print(f\"RTF = {rtf:5f}\")\n",
|
| 340 |
+
"import IPython.display as ipd\n",
|
| 341 |
+
"display(ipd.Audio(wav, rate=24000))"
|
| 342 |
+
]
|
| 343 |
+
},
|
| 344 |
+
{
|
| 345 |
+
"cell_type": "markdown",
|
| 346 |
+
"id": "2d5d9df0",
|
| 347 |
+
"metadata": {},
|
| 348 |
+
"source": [
|
| 349 |
+
"#### With higher diffusion steps (more diverse)\n",
|
| 350 |
+
"Since the sampler is ancestral, the higher the stpes, the more diverse the samples are, with the cost of slower synthesis speed."
|
| 351 |
+
]
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"cell_type": "code",
|
| 355 |
+
"execution_count": null,
|
| 356 |
+
"id": "a10129fd",
|
| 357 |
+
"metadata": {},
|
| 358 |
+
"outputs": [],
|
| 359 |
+
"source": [
|
| 360 |
+
"start = time.time()\n",
|
| 361 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 362 |
+
"wav = inference(text, noise, diffusion_steps=10, embedding_scale=1)\n",
|
| 363 |
+
"rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 364 |
+
"print(f\"RTF = {rtf:5f}\")\n",
|
| 365 |
+
"import IPython.display as ipd\n",
|
| 366 |
+
"display(ipd.Audio(wav, rate=24000))"
|
| 367 |
+
]
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"cell_type": "markdown",
|
| 371 |
+
"id": "1877ea15",
|
| 372 |
+
"metadata": {},
|
| 373 |
+
"source": [
|
| 374 |
+
"### Speech expressiveness\n",
|
| 375 |
+
"The following section recreates the samples shown in [Section 6](https://styletts2.github.io/#emo) of the demo page."
|
| 376 |
+
]
|
| 377 |
+
},
|
| 378 |
+
{
|
| 379 |
+
"cell_type": "markdown",
|
| 380 |
+
"id": "4c4777b7",
|
| 381 |
+
"metadata": {},
|
| 382 |
+
"source": [
|
| 383 |
+
"#### With embedding_scale=1\n",
|
| 384 |
+
"This is the classifier-free guidance scale. The higher the scale, the more conditional the style is to the input text and hence more emotional. "
|
| 385 |
+
]
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"cell_type": "code",
|
| 389 |
+
"execution_count": null,
|
| 390 |
+
"id": "c29ea2f0",
|
| 391 |
+
"metadata": {},
|
| 392 |
+
"outputs": [],
|
| 393 |
+
"source": [
|
| 394 |
+
"texts = {}\n",
|
| 395 |
+
"texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 396 |
+
"texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 397 |
+
"texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 398 |
+
"texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\"\n",
|
| 399 |
+
"\n",
|
| 400 |
+
"for k,v in texts.items():\n",
|
| 401 |
+
" noise = torch.randn(1,1,256).to(device)\n",
|
| 402 |
+
" wav = inference(v, noise, diffusion_steps=10, embedding_scale=1)\n",
|
| 403 |
+
" print(k + \": \")\n",
|
| 404 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 405 |
+
]
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"cell_type": "markdown",
|
| 409 |
+
"id": "3c89499f",
|
| 410 |
+
"metadata": {},
|
| 411 |
+
"source": [
|
| 412 |
+
"#### With embedding_scale=2"
|
| 413 |
+
]
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"cell_type": "code",
|
| 417 |
+
"execution_count": null,
|
| 418 |
+
"id": "f73be3aa",
|
| 419 |
+
"metadata": {},
|
| 420 |
+
"outputs": [],
|
| 421 |
+
"source": [
|
| 422 |
+
"texts = {}\n",
|
| 423 |
+
"texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 424 |
+
"texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 425 |
+
"texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 426 |
+
"texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\"\n",
|
| 427 |
+
"\n",
|
| 428 |
+
"for k,v in texts.items():\n",
|
| 429 |
+
" noise = torch.randn(1,1,256).to(device)\n",
|
| 430 |
+
" wav = inference(v, noise, diffusion_steps=10, embedding_scale=2) # embedding_scale=2 for more pronounced emotion\n",
|
| 431 |
+
" print(k + \": \")\n",
|
| 432 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 433 |
+
]
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"cell_type": "markdown",
|
| 437 |
+
"id": "9320da63",
|
| 438 |
+
"metadata": {},
|
| 439 |
+
"source": [
|
| 440 |
+
"### Long-form generation\n",
|
| 441 |
+
"This section includes basic implementation of Algorithm 1 in the paper for consistent longform audio generation. The example passage is taken from [Section 5](https://styletts2.github.io/#long) of the demo page. "
|
| 442 |
+
]
|
| 443 |
+
},
|
| 444 |
+
{
|
| 445 |
+
"cell_type": "code",
|
| 446 |
+
"execution_count": null,
|
| 447 |
+
"id": "cdd4db51",
|
| 448 |
+
"metadata": {},
|
| 449 |
+
"outputs": [],
|
| 450 |
+
"source": [
|
| 451 |
+
"passage = '''If the supply of fruit is greater than the family needs, it may be made a source of income by sending the fresh fruit to the market if there is one near enough, or by preserving, canning, and making jelly for sale. To make such an enterprise a success the fruit and work must be first class. There is magic in the word \"Homemade,\" when the product appeals to the eye and the palate; but many careless and incompetent people have found to their sorrow that this word has not magic enough to float inferior goods on the market. As a rule large canning and preserving establishments are clean and have the best appliances, and they employ chemists and skilled labor. The home product must be very good to compete with the attractive goods that are sent out from such establishments. Yet for first-class homemade products there is a market in all large cities. All first-class grocers have customers who purchase such goods.'''"
|
| 452 |
+
]
|
| 453 |
+
},
|
| 454 |
+
{
|
| 455 |
+
"cell_type": "code",
|
| 456 |
+
"execution_count": null,
|
| 457 |
+
"id": "ebb941c8",
|
| 458 |
+
"metadata": {},
|
| 459 |
+
"outputs": [],
|
| 460 |
+
"source": [
|
| 461 |
+
"def LFinference(text, s_prev, noise, alpha=0.7, diffusion_steps=5, embedding_scale=1):\n",
|
| 462 |
+
" text = text.strip()\n",
|
| 463 |
+
" text = text.replace('\"', '')\n",
|
| 464 |
+
" ps = global_phonemizer.phonemize([text])\n",
|
| 465 |
+
" ps = word_tokenize(ps[0])\n",
|
| 466 |
+
" ps = ' '.join(ps)\n",
|
| 467 |
+
"\n",
|
| 468 |
+
" tokens = textclenaer(ps)\n",
|
| 469 |
+
" tokens.insert(0, 0)\n",
|
| 470 |
+
" tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n",
|
| 471 |
+
" \n",
|
| 472 |
+
" with torch.no_grad():\n",
|
| 473 |
+
" input_lengths = torch.LongTensor([tokens.shape[-1]]).to(tokens.device)\n",
|
| 474 |
+
" text_mask = length_to_mask(input_lengths).to(tokens.device)\n",
|
| 475 |
+
"\n",
|
| 476 |
+
" t_en = model.text_encoder(tokens, input_lengths, text_mask)\n",
|
| 477 |
+
" bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())\n",
|
| 478 |
+
" d_en = model.bert_encoder(bert_dur).transpose(-1, -2) \n",
|
| 479 |
+
"\n",
|
| 480 |
+
" s_pred = sampler(noise, \n",
|
| 481 |
+
" embedding=bert_dur[0].unsqueeze(0), num_steps=diffusion_steps,\n",
|
| 482 |
+
" embedding_scale=embedding_scale).squeeze(0)\n",
|
| 483 |
+
" \n",
|
| 484 |
+
" if s_prev is not None:\n",
|
| 485 |
+
" # convex combination of previous and current style\n",
|
| 486 |
+
" s_pred = alpha * s_prev + (1 - alpha) * s_pred\n",
|
| 487 |
+
" \n",
|
| 488 |
+
" s = s_pred[:, 128:]\n",
|
| 489 |
+
" ref = s_pred[:, :128]\n",
|
| 490 |
+
"\n",
|
| 491 |
+
" d = model.predictor.text_encoder(d_en, s, input_lengths, text_mask)\n",
|
| 492 |
+
"\n",
|
| 493 |
+
" x, _ = model.predictor.lstm(d)\n",
|
| 494 |
+
" duration = model.predictor.duration_proj(x)\n",
|
| 495 |
+
" duration = torch.sigmoid(duration).sum(axis=-1)\n",
|
| 496 |
+
" pred_dur = torch.round(duration.squeeze()).clamp(min=1)\n",
|
| 497 |
+
"\n",
|
| 498 |
+
" pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))\n",
|
| 499 |
+
" c_frame = 0\n",
|
| 500 |
+
" for i in range(pred_aln_trg.size(0)):\n",
|
| 501 |
+
" pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1\n",
|
| 502 |
+
" c_frame += int(pred_dur[i].data)\n",
|
| 503 |
+
"\n",
|
| 504 |
+
" # encode prosody\n",
|
| 505 |
+
" en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 506 |
+
" F0_pred, N_pred = model.predictor.F0Ntrain(en, s)\n",
|
| 507 |
+
" out = model.decoder((t_en @ pred_aln_trg.unsqueeze(0).to(device)), \n",
|
| 508 |
+
" F0_pred, N_pred, ref.squeeze().unsqueeze(0))\n",
|
| 509 |
+
" \n",
|
| 510 |
+
" return out.squeeze().cpu().numpy(), s_pred"
|
| 511 |
+
]
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"cell_type": "code",
|
| 515 |
+
"execution_count": null,
|
| 516 |
+
"id": "7ca0ef2e",
|
| 517 |
+
"metadata": {},
|
| 518 |
+
"outputs": [],
|
| 519 |
+
"source": [
|
| 520 |
+
"sentences = passage.split('.') # simple split by comma\n",
|
| 521 |
+
"wavs = []\n",
|
| 522 |
+
"s_prev = None\n",
|
| 523 |
+
"for text in sentences:\n",
|
| 524 |
+
" if text.strip() == \"\": continue\n",
|
| 525 |
+
" text += '.' # add it back\n",
|
| 526 |
+
" noise = torch.randn(1,1,256).to(device)\n",
|
| 527 |
+
" wav, s_prev = LFinference(text, s_prev, noise, alpha=0.7, diffusion_steps=10, embedding_scale=1.5)\n",
|
| 528 |
+
" wavs.append(wav)\n",
|
| 529 |
+
"display(ipd.Audio(np.concatenate(wavs), rate=24000, normalize=False))"
|
| 530 |
+
]
|
| 531 |
+
}
|
| 532 |
+
],
|
| 533 |
+
"metadata": {
|
| 534 |
+
"kernelspec": {
|
| 535 |
+
"display_name": "NLP",
|
| 536 |
+
"language": "python",
|
| 537 |
+
"name": "nlp"
|
| 538 |
+
},
|
| 539 |
+
"language_info": {
|
| 540 |
+
"codemirror_mode": {
|
| 541 |
+
"name": "ipython",
|
| 542 |
+
"version": 3
|
| 543 |
+
},
|
| 544 |
+
"file_extension": ".py",
|
| 545 |
+
"mimetype": "text/x-python",
|
| 546 |
+
"name": "python",
|
| 547 |
+
"nbconvert_exporter": "python",
|
| 548 |
+
"pygments_lexer": "ipython3",
|
| 549 |
+
"version": "3.9.7"
|
| 550 |
+
}
|
| 551 |
+
},
|
| 552 |
+
"nbformat": 4,
|
| 553 |
+
"nbformat_minor": 5
|
| 554 |
+
}
|
Demo/Inference_LibriTTS.ipynb
ADDED
|
@@ -0,0 +1,1155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "9adb7bd1",
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"source": [
|
| 8 |
+
"# StyleTTS 2 Demo (LibriTTS)\n",
|
| 9 |
+
"\n",
|
| 10 |
+
"Before you run the following cells, please make sure you have downloaded [reference_audio.zip](https://huggingface.co/yl4579/StyleTTS2-LibriTTS/resolve/main/reference_audio.zip) and unzipped it under the `demo` folder."
|
| 11 |
+
]
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"cell_type": "markdown",
|
| 15 |
+
"id": "6108384d",
|
| 16 |
+
"metadata": {},
|
| 17 |
+
"source": [
|
| 18 |
+
"### Utils"
|
| 19 |
+
]
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"cell_type": "code",
|
| 23 |
+
"execution_count": null,
|
| 24 |
+
"id": "96e173bf",
|
| 25 |
+
"metadata": {},
|
| 26 |
+
"outputs": [],
|
| 27 |
+
"source": [
|
| 28 |
+
"import torch\n",
|
| 29 |
+
"torch.manual_seed(0)\n",
|
| 30 |
+
"torch.backends.cudnn.benchmark = False\n",
|
| 31 |
+
"torch.backends.cudnn.deterministic = True\n",
|
| 32 |
+
"\n",
|
| 33 |
+
"import random\n",
|
| 34 |
+
"random.seed(0)\n",
|
| 35 |
+
"\n",
|
| 36 |
+
"import numpy as np\n",
|
| 37 |
+
"np.random.seed(0)"
|
| 38 |
+
]
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"cell_type": "code",
|
| 42 |
+
"execution_count": null,
|
| 43 |
+
"id": "da84c60f",
|
| 44 |
+
"metadata": {},
|
| 45 |
+
"outputs": [],
|
| 46 |
+
"source": [
|
| 47 |
+
"%cd .."
|
| 48 |
+
]
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"cell_type": "code",
|
| 52 |
+
"execution_count": null,
|
| 53 |
+
"id": "5a3ddcc8",
|
| 54 |
+
"metadata": {},
|
| 55 |
+
"outputs": [],
|
| 56 |
+
"source": [
|
| 57 |
+
"# load packages\n",
|
| 58 |
+
"import time\n",
|
| 59 |
+
"import random\n",
|
| 60 |
+
"import yaml\n",
|
| 61 |
+
"from munch import Munch\n",
|
| 62 |
+
"import numpy as np\n",
|
| 63 |
+
"import torch\n",
|
| 64 |
+
"from torch import nn\n",
|
| 65 |
+
"import torch.nn.functional as F\n",
|
| 66 |
+
"import torchaudio\n",
|
| 67 |
+
"import librosa\n",
|
| 68 |
+
"from nltk.tokenize import word_tokenize\n",
|
| 69 |
+
"\n",
|
| 70 |
+
"from models import *\n",
|
| 71 |
+
"from utils import *\n",
|
| 72 |
+
"from text_utils import TextCleaner\n",
|
| 73 |
+
"textclenaer = TextCleaner()\n",
|
| 74 |
+
"\n",
|
| 75 |
+
"%matplotlib inline"
|
| 76 |
+
]
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"cell_type": "code",
|
| 80 |
+
"execution_count": null,
|
| 81 |
+
"id": "00ee05e1",
|
| 82 |
+
"metadata": {},
|
| 83 |
+
"outputs": [],
|
| 84 |
+
"source": [
|
| 85 |
+
"to_mel = torchaudio.transforms.MelSpectrogram(\n",
|
| 86 |
+
" n_mels=80, n_fft=2048, win_length=1200, hop_length=300)\n",
|
| 87 |
+
"mean, std = -4, 4\n",
|
| 88 |
+
"\n",
|
| 89 |
+
"def length_to_mask(lengths):\n",
|
| 90 |
+
" mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)\n",
|
| 91 |
+
" mask = torch.gt(mask+1, lengths.unsqueeze(1))\n",
|
| 92 |
+
" return mask\n",
|
| 93 |
+
"\n",
|
| 94 |
+
"def preprocess(wave):\n",
|
| 95 |
+
" wave_tensor = torch.from_numpy(wave).float()\n",
|
| 96 |
+
" mel_tensor = to_mel(wave_tensor)\n",
|
| 97 |
+
" mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - mean) / std\n",
|
| 98 |
+
" return mel_tensor\n",
|
| 99 |
+
"\n",
|
| 100 |
+
"def compute_style(path):\n",
|
| 101 |
+
" wave, sr = librosa.load(path, sr=24000)\n",
|
| 102 |
+
" audio, index = librosa.effects.trim(wave, top_db=30)\n",
|
| 103 |
+
" if sr != 24000:\n",
|
| 104 |
+
" audio = librosa.resample(audio, sr, 24000)\n",
|
| 105 |
+
" mel_tensor = preprocess(audio).to(device)\n",
|
| 106 |
+
"\n",
|
| 107 |
+
" with torch.no_grad():\n",
|
| 108 |
+
" ref_s = model.style_encoder(mel_tensor.unsqueeze(1))\n",
|
| 109 |
+
" ref_p = model.predictor_encoder(mel_tensor.unsqueeze(1))\n",
|
| 110 |
+
"\n",
|
| 111 |
+
" return torch.cat([ref_s, ref_p], dim=1)"
|
| 112 |
+
]
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"cell_type": "code",
|
| 116 |
+
"execution_count": null,
|
| 117 |
+
"id": "bbdc04c0",
|
| 118 |
+
"metadata": {},
|
| 119 |
+
"outputs": [],
|
| 120 |
+
"source": [
|
| 121 |
+
"device = 'cuda' if torch.cuda.is_available() else 'cpu'"
|
| 122 |
+
]
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"cell_type": "markdown",
|
| 126 |
+
"id": "7b9cecbe",
|
| 127 |
+
"metadata": {},
|
| 128 |
+
"source": [
|
| 129 |
+
"### Load models"
|
| 130 |
+
]
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"cell_type": "code",
|
| 134 |
+
"execution_count": null,
|
| 135 |
+
"id": "64fc4c0f",
|
| 136 |
+
"metadata": {},
|
| 137 |
+
"outputs": [],
|
| 138 |
+
"source": [
|
| 139 |
+
"# load phonemizer\n",
|
| 140 |
+
"import phonemizer\n",
|
| 141 |
+
"global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True)"
|
| 142 |
+
]
|
| 143 |
+
},
|
| 144 |
+
{
|
| 145 |
+
"cell_type": "code",
|
| 146 |
+
"execution_count": null,
|
| 147 |
+
"id": "48e7b644",
|
| 148 |
+
"metadata": {},
|
| 149 |
+
"outputs": [],
|
| 150 |
+
"source": [
|
| 151 |
+
"config = yaml.safe_load(open(\"Models/LibriTTS/config.yml\"))\n",
|
| 152 |
+
"\n",
|
| 153 |
+
"# load pretrained ASR model\n",
|
| 154 |
+
"ASR_config = config.get('ASR_config', False)\n",
|
| 155 |
+
"ASR_path = config.get('ASR_path', False)\n",
|
| 156 |
+
"text_aligner = load_ASR_models(ASR_path, ASR_config)\n",
|
| 157 |
+
"\n",
|
| 158 |
+
"# load pretrained F0 model\n",
|
| 159 |
+
"F0_path = config.get('F0_path', False)\n",
|
| 160 |
+
"pitch_extractor = load_F0_models(F0_path)\n",
|
| 161 |
+
"\n",
|
| 162 |
+
"# load BERT model\n",
|
| 163 |
+
"from Utils.PLBERT.util import load_plbert\n",
|
| 164 |
+
"BERT_path = config.get('PLBERT_dir', False)\n",
|
| 165 |
+
"plbert = load_plbert(BERT_path)"
|
| 166 |
+
]
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"cell_type": "code",
|
| 170 |
+
"execution_count": null,
|
| 171 |
+
"id": "ffc18cf7",
|
| 172 |
+
"metadata": {},
|
| 173 |
+
"outputs": [],
|
| 174 |
+
"source": [
|
| 175 |
+
"model_params = recursive_munch(config['model_params'])\n",
|
| 176 |
+
"model = build_model(model_params, text_aligner, pitch_extractor, plbert)\n",
|
| 177 |
+
"_ = [model[key].eval() for key in model]\n",
|
| 178 |
+
"_ = [model[key].to(device) for key in model]"
|
| 179 |
+
]
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"cell_type": "code",
|
| 183 |
+
"execution_count": null,
|
| 184 |
+
"id": "64529d5c",
|
| 185 |
+
"metadata": {},
|
| 186 |
+
"outputs": [],
|
| 187 |
+
"source": [
|
| 188 |
+
"params_whole = torch.load(\"Models/LibriTTS/epochs_2nd_00020.pth\", map_location='cpu')\n",
|
| 189 |
+
"params = params_whole['net']"
|
| 190 |
+
]
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"cell_type": "code",
|
| 194 |
+
"execution_count": null,
|
| 195 |
+
"id": "895d9706",
|
| 196 |
+
"metadata": {},
|
| 197 |
+
"outputs": [],
|
| 198 |
+
"source": [
|
| 199 |
+
"for key in model:\n",
|
| 200 |
+
" if key in params:\n",
|
| 201 |
+
" print('%s loaded' % key)\n",
|
| 202 |
+
" try:\n",
|
| 203 |
+
" model[key].load_state_dict(params[key])\n",
|
| 204 |
+
" except:\n",
|
| 205 |
+
" from collections import OrderedDict\n",
|
| 206 |
+
" state_dict = params[key]\n",
|
| 207 |
+
" new_state_dict = OrderedDict()\n",
|
| 208 |
+
" for k, v in state_dict.items():\n",
|
| 209 |
+
" name = k[7:] # remove `module.`\n",
|
| 210 |
+
" new_state_dict[name] = v\n",
|
| 211 |
+
" # load params\n",
|
| 212 |
+
" model[key].load_state_dict(new_state_dict, strict=False)\n",
|
| 213 |
+
"# except:\n",
|
| 214 |
+
"# _load(params[key], model[key])\n",
|
| 215 |
+
"_ = [model[key].eval() for key in model]"
|
| 216 |
+
]
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"cell_type": "code",
|
| 220 |
+
"execution_count": null,
|
| 221 |
+
"id": "c1a59db2",
|
| 222 |
+
"metadata": {},
|
| 223 |
+
"outputs": [],
|
| 224 |
+
"source": [
|
| 225 |
+
"from Modules.diffusion.sampler import DiffusionSampler, ADPM2Sampler, KarrasSchedule"
|
| 226 |
+
]
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
"cell_type": "code",
|
| 230 |
+
"execution_count": null,
|
| 231 |
+
"id": "e30985ab",
|
| 232 |
+
"metadata": {},
|
| 233 |
+
"outputs": [],
|
| 234 |
+
"source": [
|
| 235 |
+
"sampler = DiffusionSampler(\n",
|
| 236 |
+
" model.diffusion.diffusion,\n",
|
| 237 |
+
" sampler=ADPM2Sampler(),\n",
|
| 238 |
+
" sigma_schedule=KarrasSchedule(sigma_min=0.0001, sigma_max=3.0, rho=9.0), # empirical parameters\n",
|
| 239 |
+
" clamp=False\n",
|
| 240 |
+
")"
|
| 241 |
+
]
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"cell_type": "markdown",
|
| 245 |
+
"id": "b803110e",
|
| 246 |
+
"metadata": {},
|
| 247 |
+
"source": [
|
| 248 |
+
"### Synthesize speech"
|
| 249 |
+
]
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"cell_type": "code",
|
| 253 |
+
"execution_count": null,
|
| 254 |
+
"id": "ca57469c",
|
| 255 |
+
"metadata": {},
|
| 256 |
+
"outputs": [],
|
| 257 |
+
"source": [
|
| 258 |
+
"def inference(text, ref_s, alpha = 0.3, beta = 0.7, diffusion_steps=5, embedding_scale=1):\n",
|
| 259 |
+
" text = text.strip()\n",
|
| 260 |
+
" ps = global_phonemizer.phonemize([text])\n",
|
| 261 |
+
" ps = word_tokenize(ps[0])\n",
|
| 262 |
+
" ps = ' '.join(ps)\n",
|
| 263 |
+
" tokens = textclenaer(ps)\n",
|
| 264 |
+
" tokens.insert(0, 0)\n",
|
| 265 |
+
" tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n",
|
| 266 |
+
" \n",
|
| 267 |
+
" with torch.no_grad():\n",
|
| 268 |
+
" input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)\n",
|
| 269 |
+
" text_mask = length_to_mask(input_lengths).to(device)\n",
|
| 270 |
+
"\n",
|
| 271 |
+
" t_en = model.text_encoder(tokens, input_lengths, text_mask)\n",
|
| 272 |
+
" bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())\n",
|
| 273 |
+
" d_en = model.bert_encoder(bert_dur).transpose(-1, -2) \n",
|
| 274 |
+
"\n",
|
| 275 |
+
" s_pred = sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(device), \n",
|
| 276 |
+
" embedding=bert_dur,\n",
|
| 277 |
+
" embedding_scale=embedding_scale,\n",
|
| 278 |
+
" features=ref_s, # reference from the same speaker as the embedding\n",
|
| 279 |
+
" num_steps=diffusion_steps).squeeze(1)\n",
|
| 280 |
+
"\n",
|
| 281 |
+
"\n",
|
| 282 |
+
" s = s_pred[:, 128:]\n",
|
| 283 |
+
" ref = s_pred[:, :128]\n",
|
| 284 |
+
"\n",
|
| 285 |
+
" ref = alpha * ref + (1 - alpha) * ref_s[:, :128]\n",
|
| 286 |
+
" s = beta * s + (1 - beta) * ref_s[:, 128:]\n",
|
| 287 |
+
"\n",
|
| 288 |
+
" d = model.predictor.text_encoder(d_en, \n",
|
| 289 |
+
" s, input_lengths, text_mask)\n",
|
| 290 |
+
"\n",
|
| 291 |
+
" x, _ = model.predictor.lstm(d)\n",
|
| 292 |
+
" duration = model.predictor.duration_proj(x)\n",
|
| 293 |
+
"\n",
|
| 294 |
+
" duration = torch.sigmoid(duration).sum(axis=-1)\n",
|
| 295 |
+
" pred_dur = torch.round(duration.squeeze()).clamp(min=1)\n",
|
| 296 |
+
"\n",
|
| 297 |
+
"\n",
|
| 298 |
+
" pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))\n",
|
| 299 |
+
" c_frame = 0\n",
|
| 300 |
+
" for i in range(pred_aln_trg.size(0)):\n",
|
| 301 |
+
" pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1\n",
|
| 302 |
+
" c_frame += int(pred_dur[i].data)\n",
|
| 303 |
+
"\n",
|
| 304 |
+
" # encode prosody\n",
|
| 305 |
+
" en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 306 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 307 |
+
" asr_new = torch.zeros_like(en)\n",
|
| 308 |
+
" asr_new[:, :, 0] = en[:, :, 0]\n",
|
| 309 |
+
" asr_new[:, :, 1:] = en[:, :, 0:-1]\n",
|
| 310 |
+
" en = asr_new\n",
|
| 311 |
+
"\n",
|
| 312 |
+
" F0_pred, N_pred = model.predictor.F0Ntrain(en, s)\n",
|
| 313 |
+
"\n",
|
| 314 |
+
" asr = (t_en @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 315 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 316 |
+
" asr_new = torch.zeros_like(asr)\n",
|
| 317 |
+
" asr_new[:, :, 0] = asr[:, :, 0]\n",
|
| 318 |
+
" asr_new[:, :, 1:] = asr[:, :, 0:-1]\n",
|
| 319 |
+
" asr = asr_new\n",
|
| 320 |
+
"\n",
|
| 321 |
+
" out = model.decoder(asr, \n",
|
| 322 |
+
" F0_pred, N_pred, ref.squeeze().unsqueeze(0))\n",
|
| 323 |
+
" \n",
|
| 324 |
+
" \n",
|
| 325 |
+
" return out.squeeze().cpu().numpy()[..., :-50] # weird pulse at the end of the model, need to be fixed later"
|
| 326 |
+
]
|
| 327 |
+
},
|
| 328 |
+
{
|
| 329 |
+
"cell_type": "markdown",
|
| 330 |
+
"id": "d438ef4f",
|
| 331 |
+
"metadata": {},
|
| 332 |
+
"source": [
|
| 333 |
+
"#### Basic synthesis (5 diffusion steps, seen speakers)"
|
| 334 |
+
]
|
| 335 |
+
},
|
| 336 |
+
{
|
| 337 |
+
"cell_type": "code",
|
| 338 |
+
"execution_count": null,
|
| 339 |
+
"id": "cace9787",
|
| 340 |
+
"metadata": {},
|
| 341 |
+
"outputs": [],
|
| 342 |
+
"source": [
|
| 343 |
+
"text = ''' StyleTTS 2 is a text to speech model that leverages style diffusion and adversarial training with large speech language models to achieve human level text to speech synthesis. '''"
|
| 344 |
+
]
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"cell_type": "code",
|
| 348 |
+
"execution_count": null,
|
| 349 |
+
"id": "7c88f461",
|
| 350 |
+
"metadata": {},
|
| 351 |
+
"outputs": [],
|
| 352 |
+
"source": [
|
| 353 |
+
"reference_dicts = {}\n",
|
| 354 |
+
"reference_dicts['696_92939'] = \"Demo/reference_audio/696_92939_000016_000006.wav\"\n",
|
| 355 |
+
"reference_dicts['1789_142896'] = \"Demo/reference_audio/1789_142896_000022_000005.wav\""
|
| 356 |
+
]
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"cell_type": "code",
|
| 360 |
+
"execution_count": null,
|
| 361 |
+
"id": "16e8ac60",
|
| 362 |
+
"metadata": {},
|
| 363 |
+
"outputs": [],
|
| 364 |
+
"source": [
|
| 365 |
+
"start = time.time()\n",
|
| 366 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 367 |
+
"for k, path in reference_dicts.items():\n",
|
| 368 |
+
" ref_s = compute_style(path)\n",
|
| 369 |
+
" \n",
|
| 370 |
+
" wav = inference(text, ref_s, alpha=0.3, beta=0.7, diffusion_steps=5, embedding_scale=1)\n",
|
| 371 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 372 |
+
" print(f\"RTF = {rtf:5f}\")\n",
|
| 373 |
+
" import IPython.display as ipd\n",
|
| 374 |
+
" print(k + ' Synthesized:')\n",
|
| 375 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 376 |
+
" print('Reference:')\n",
|
| 377 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 378 |
+
]
|
| 379 |
+
},
|
| 380 |
+
{
|
| 381 |
+
"cell_type": "markdown",
|
| 382 |
+
"id": "14838708",
|
| 383 |
+
"metadata": {},
|
| 384 |
+
"source": [
|
| 385 |
+
"#### With higher diffusion steps (more diverse)\n",
|
| 386 |
+
"\n",
|
| 387 |
+
"Since the sampler is ancestral, the higher the stpes, the more diverse the samples are, with the cost of slower synthesis speed."
|
| 388 |
+
]
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"cell_type": "code",
|
| 392 |
+
"execution_count": null,
|
| 393 |
+
"id": "6fbff03b",
|
| 394 |
+
"metadata": {},
|
| 395 |
+
"outputs": [],
|
| 396 |
+
"source": [
|
| 397 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 398 |
+
"for k, path in reference_dicts.items():\n",
|
| 399 |
+
" ref_s = compute_style(path)\n",
|
| 400 |
+
" start = time.time()\n",
|
| 401 |
+
" wav = inference(text, ref_s, alpha=0.3, beta=0.7, diffusion_steps=10, embedding_scale=1)\n",
|
| 402 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 403 |
+
" print(f\"RTF = {rtf:5f}\")\n",
|
| 404 |
+
" import IPython.display as ipd\n",
|
| 405 |
+
" print(k + ' Synthesized:')\n",
|
| 406 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 407 |
+
" print(k + ' Reference:')\n",
|
| 408 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 409 |
+
]
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"cell_type": "markdown",
|
| 413 |
+
"id": "7e6867fd",
|
| 414 |
+
"metadata": {},
|
| 415 |
+
"source": [
|
| 416 |
+
"#### Basic synthesis (5 diffusion steps, umseen speakers)\n",
|
| 417 |
+
"The following samples are to reproduce samples in [Section 4](https://styletts2.github.io/#libri) of the demo page. All spsakers are unseen during training. You can compare the generated samples to popular zero-shot TTS models like Vall-E and NaturalSpeech 2."
|
| 418 |
+
]
|
| 419 |
+
},
|
| 420 |
+
{
|
| 421 |
+
"cell_type": "code",
|
| 422 |
+
"execution_count": null,
|
| 423 |
+
"id": "f4e8faa0",
|
| 424 |
+
"metadata": {},
|
| 425 |
+
"outputs": [],
|
| 426 |
+
"source": [
|
| 427 |
+
"reference_dicts = {}\n",
|
| 428 |
+
"# format: (path, text)\n",
|
| 429 |
+
"reference_dicts['1221-135767'] = (\"Demo/reference_audio/1221-135767-0014.wav\", \"Yea, his honourable worship is within, but he hath a godly minister or two with him, and likewise a leech.\")\n",
|
| 430 |
+
"reference_dicts['5639-40744'] = (\"Demo/reference_audio/5639-40744-0020.wav\", \"Thus did this humane and right minded father comfort his unhappy daughter, and her mother embracing her again, did all she could to soothe her feelings.\")\n",
|
| 431 |
+
"reference_dicts['908-157963'] = (\"Demo/reference_audio/908-157963-0027.wav\", \"And lay me down in my cold bed and leave my shining lot.\")\n",
|
| 432 |
+
"reference_dicts['4077-13754'] = (\"Demo/reference_audio/4077-13754-0000.wav\", \"The army found the people in poverty and left them in comparative wealth.\")"
|
| 433 |
+
]
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"cell_type": "code",
|
| 437 |
+
"execution_count": null,
|
| 438 |
+
"id": "653f1406",
|
| 439 |
+
"metadata": {},
|
| 440 |
+
"outputs": [],
|
| 441 |
+
"source": [
|
| 442 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 443 |
+
"for k, v in reference_dicts.items():\n",
|
| 444 |
+
" path, text = v\n",
|
| 445 |
+
" ref_s = compute_style(path)\n",
|
| 446 |
+
" start = time.time()\n",
|
| 447 |
+
" wav = inference(text, ref_s, alpha=0.3, beta=0.7, diffusion_steps=5, embedding_scale=1)\n",
|
| 448 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 449 |
+
" print(f\"RTF = {rtf:5f}\")\n",
|
| 450 |
+
" import IPython.display as ipd\n",
|
| 451 |
+
" print(k + ' Synthesized: ' + text)\n",
|
| 452 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 453 |
+
" print(k + ' Reference:')\n",
|
| 454 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 455 |
+
]
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"cell_type": "markdown",
|
| 459 |
+
"id": "141e91b3",
|
| 460 |
+
"metadata": {},
|
| 461 |
+
"source": [
|
| 462 |
+
"### Speech expressiveness\n",
|
| 463 |
+
"\n",
|
| 464 |
+
"The following section recreates the samples shown in [Section 6](https://styletts2.github.io/#emo) of the demo page. The speaker reference used is `1221-135767-0014.wav`, which is unseen during training. \n",
|
| 465 |
+
"\n",
|
| 466 |
+
"#### With `embedding_scale=1`\n",
|
| 467 |
+
"This is the classifier-free guidance scale. The higher the scale, the more conditional the style is to the input text and hence more emotional.\n",
|
| 468 |
+
"\n"
|
| 469 |
+
]
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"cell_type": "code",
|
| 473 |
+
"execution_count": null,
|
| 474 |
+
"id": "81addda4",
|
| 475 |
+
"metadata": {},
|
| 476 |
+
"outputs": [],
|
| 477 |
+
"source": [
|
| 478 |
+
"ref_s = compute_style(\"Demo/reference_audio/1221-135767-0014.wav\")"
|
| 479 |
+
]
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"cell_type": "code",
|
| 483 |
+
"execution_count": null,
|
| 484 |
+
"id": "be1b2a11",
|
| 485 |
+
"metadata": {},
|
| 486 |
+
"outputs": [],
|
| 487 |
+
"source": [
|
| 488 |
+
"texts = {}\n",
|
| 489 |
+
"texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 490 |
+
"texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 491 |
+
"texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 492 |
+
"texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\"\n",
|
| 493 |
+
"\n",
|
| 494 |
+
"for k,v in texts.items():\n",
|
| 495 |
+
" wav = inference(v, ref_s, diffusion_steps=10, alpha=0.3, beta=0.7, embedding_scale=1)\n",
|
| 496 |
+
" print(k + \": \")\n",
|
| 497 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 498 |
+
]
|
| 499 |
+
},
|
| 500 |
+
{
|
| 501 |
+
"cell_type": "markdown",
|
| 502 |
+
"id": "96d262b8",
|
| 503 |
+
"metadata": {},
|
| 504 |
+
"source": [
|
| 505 |
+
"#### With `embedding_scale=2`"
|
| 506 |
+
]
|
| 507 |
+
},
|
| 508 |
+
{
|
| 509 |
+
"cell_type": "code",
|
| 510 |
+
"execution_count": null,
|
| 511 |
+
"id": "3e7d40b4",
|
| 512 |
+
"metadata": {},
|
| 513 |
+
"outputs": [],
|
| 514 |
+
"source": [
|
| 515 |
+
"texts = {}\n",
|
| 516 |
+
"texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 517 |
+
"texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 518 |
+
"texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 519 |
+
"texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\"\n",
|
| 520 |
+
"\n",
|
| 521 |
+
"for k,v in texts.items():\n",
|
| 522 |
+
" noise = torch.randn(1,1,256).to(device)\n",
|
| 523 |
+
" wav = inference(v, ref_s, diffusion_steps=10, alpha=0.3, beta=0.7, embedding_scale=2)\n",
|
| 524 |
+
" print(k + \": \")\n",
|
| 525 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 526 |
+
]
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"cell_type": "markdown",
|
| 530 |
+
"id": "402b2bd6",
|
| 531 |
+
"metadata": {},
|
| 532 |
+
"source": [
|
| 533 |
+
"#### With `embedding_scale=2, alpha = 0.5, beta = 0.9`\n",
|
| 534 |
+
"`alpha` and `beta` is the factor to determine much we use the style sampled based on the text instead of the reference. The higher the value of `alpha` and `beta`, the more suitable the style it is to the text but less similar to the reference. Using higher beta makes the synthesized speech more emotional, at the cost of lower similarity to the reference. `alpha` determines the timbre of the speaker while `beta` determines the prosody. "
|
| 535 |
+
]
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"cell_type": "code",
|
| 539 |
+
"execution_count": null,
|
| 540 |
+
"id": "599de5d5",
|
| 541 |
+
"metadata": {},
|
| 542 |
+
"outputs": [],
|
| 543 |
+
"source": [
|
| 544 |
+
"texts = {}\n",
|
| 545 |
+
"texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 546 |
+
"texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 547 |
+
"texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 548 |
+
"texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\"\n",
|
| 549 |
+
"\n",
|
| 550 |
+
"for k,v in texts.items():\n",
|
| 551 |
+
" noise = torch.randn(1,1,256).to(device)\n",
|
| 552 |
+
" wav = inference(v, ref_s, diffusion_steps=10, alpha=0.5, beta=0.9, embedding_scale=2)\n",
|
| 553 |
+
" print(k + \": \")\n",
|
| 554 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 555 |
+
]
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"cell_type": "markdown",
|
| 559 |
+
"id": "48548866",
|
| 560 |
+
"metadata": {},
|
| 561 |
+
"source": [
|
| 562 |
+
"### Zero-shot speaker adaptation\n",
|
| 563 |
+
"This section recreates the \"Acoustic Environment Maintenance\" and \"Speaker’s Emotion Maintenance\" demo in [Section 4](https://styletts2.github.io/#libri) of the demo page. You can compare the generated samples to popular zero-shot TTS models like Vall-E. Note that the model was trained only on LibriTTS, which is about 250 times fewer data compared to those used to trian Vall-E with similar or better effect for these maintainance. "
|
| 564 |
+
]
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"cell_type": "markdown",
|
| 568 |
+
"id": "23e81572",
|
| 569 |
+
"metadata": {},
|
| 570 |
+
"source": [
|
| 571 |
+
"#### Acoustic Environment Maintenance\n",
|
| 572 |
+
"\n",
|
| 573 |
+
"Since we want to maintain the acoustic environment in the speaker (timbre), we set `alpha = 0` to make the speaker as closer to the reference as possible while only changing the prosody according to the text. "
|
| 574 |
+
]
|
| 575 |
+
},
|
| 576 |
+
{
|
| 577 |
+
"cell_type": "code",
|
| 578 |
+
"execution_count": null,
|
| 579 |
+
"id": "8087bccb",
|
| 580 |
+
"metadata": {},
|
| 581 |
+
"outputs": [],
|
| 582 |
+
"source": [
|
| 583 |
+
"reference_dicts = {}\n",
|
| 584 |
+
"# format: (path, text)\n",
|
| 585 |
+
"reference_dicts['3'] = (\"Demo/reference_audio/3.wav\", \"As friends thing I definitely I've got more male friends.\")\n",
|
| 586 |
+
"reference_dicts['4'] = (\"Demo/reference_audio/4.wav\", \"Everything is run by computer but you got to know how to think before you can do a computer.\")\n",
|
| 587 |
+
"reference_dicts['5'] = (\"Demo/reference_audio/5.wav\", \"Then out in LA you guys got a whole another ball game within California to worry about.\")"
|
| 588 |
+
]
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"cell_type": "code",
|
| 592 |
+
"execution_count": null,
|
| 593 |
+
"id": "1e99c200",
|
| 594 |
+
"metadata": {},
|
| 595 |
+
"outputs": [],
|
| 596 |
+
"source": [
|
| 597 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 598 |
+
"for k, v in reference_dicts.items():\n",
|
| 599 |
+
" path, text = v\n",
|
| 600 |
+
" ref_s = compute_style(path)\n",
|
| 601 |
+
" start = time.time()\n",
|
| 602 |
+
" wav = inference(text, ref_s, alpha=0.0, beta=0.5, diffusion_steps=5, embedding_scale=1)\n",
|
| 603 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 604 |
+
" print(f\"RTF = {rtf:5f}\")\n",
|
| 605 |
+
" import IPython.display as ipd\n",
|
| 606 |
+
" print('Synthesized: ' + text)\n",
|
| 607 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 608 |
+
" print('Reference:')\n",
|
| 609 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 610 |
+
]
|
| 611 |
+
},
|
| 612 |
+
{
|
| 613 |
+
"cell_type": "markdown",
|
| 614 |
+
"id": "7d56505d",
|
| 615 |
+
"metadata": {},
|
| 616 |
+
"source": [
|
| 617 |
+
"#### Speaker’s Emotion Maintenance\n",
|
| 618 |
+
"\n",
|
| 619 |
+
"Since we want to maintain the emotion in the speaker (prosody), we set `beta = 0.1` to make the speaker as closer to the reference as possible while having some diversity thruogh the slight timbre change."
|
| 620 |
+
]
|
| 621 |
+
},
|
| 622 |
+
{
|
| 623 |
+
"cell_type": "code",
|
| 624 |
+
"execution_count": null,
|
| 625 |
+
"id": "f90179e7",
|
| 626 |
+
"metadata": {},
|
| 627 |
+
"outputs": [],
|
| 628 |
+
"source": [
|
| 629 |
+
"reference_dicts = {}\n",
|
| 630 |
+
"# format: (path, text)\n",
|
| 631 |
+
"reference_dicts['Anger'] = (\"Demo/reference_audio/anger.wav\", \"We have to reduce the number of plastic bags.\")\n",
|
| 632 |
+
"reference_dicts['Sleepy'] = (\"Demo/reference_audio/sleepy.wav\", \"We have to reduce the number of plastic bags.\")\n",
|
| 633 |
+
"reference_dicts['Amused'] = (\"Demo/reference_audio/amused.wav\", \"We have to reduce the number of plastic bags.\")\n",
|
| 634 |
+
"reference_dicts['Disgusted'] = (\"Demo/reference_audio/disgusted.wav\", \"We have to reduce the number of plastic bags.\")"
|
| 635 |
+
]
|
| 636 |
+
},
|
| 637 |
+
{
|
| 638 |
+
"cell_type": "code",
|
| 639 |
+
"execution_count": null,
|
| 640 |
+
"id": "2e6bdfed",
|
| 641 |
+
"metadata": {},
|
| 642 |
+
"outputs": [],
|
| 643 |
+
"source": [
|
| 644 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 645 |
+
"for k, v in reference_dicts.items():\n",
|
| 646 |
+
" path, text = v\n",
|
| 647 |
+
" ref_s = compute_style(path)\n",
|
| 648 |
+
" start = time.time()\n",
|
| 649 |
+
" wav = inference(text, ref_s, alpha=0.3, beta=0.1, diffusion_steps=10, embedding_scale=1)\n",
|
| 650 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 651 |
+
" print(f\"RTF = {rtf:5f}\")\n",
|
| 652 |
+
" import IPython.display as ipd\n",
|
| 653 |
+
" print(k + ' Synthesized: ' + text)\n",
|
| 654 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 655 |
+
" print(k + ' Reference:')\n",
|
| 656 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 657 |
+
]
|
| 658 |
+
},
|
| 659 |
+
{
|
| 660 |
+
"cell_type": "markdown",
|
| 661 |
+
"id": "37ae3963",
|
| 662 |
+
"metadata": {},
|
| 663 |
+
"source": [
|
| 664 |
+
"### Longform Narration\n",
|
| 665 |
+
"\n",
|
| 666 |
+
"This section includes basic implementation of Algorithm 1 in the paper for consistent longform audio generation. The example passage is taken from [Section 5](https://styletts2.github.io/#long) of the demo page."
|
| 667 |
+
]
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"cell_type": "code",
|
| 671 |
+
"execution_count": null,
|
| 672 |
+
"id": "f12a716b",
|
| 673 |
+
"metadata": {},
|
| 674 |
+
"outputs": [],
|
| 675 |
+
"source": [
|
| 676 |
+
"passage = '''If the supply of fruit is greater than the family needs, it may be made a source of income by sending the fresh fruit to the market if there is one near enough, or by preserving, canning, and making jelly for sale. To make such an enterprise a success the fruit and work must be first class. There is magic in the word \"Homemade,\" when the product appeals to the eye and the palate; but many careless and incompetent people have found to their sorrow that this word has not magic enough to float inferior goods on the market. As a rule large canning and preserving establishments are clean and have the best appliances, and they employ chemists and skilled labor. The home product must be very good to compete with the attractive goods that are sent out from such establishments. Yet for first class home made products there is a market in all large cities. All first-class grocers have customers who purchase such goods.'''"
|
| 677 |
+
]
|
| 678 |
+
},
|
| 679 |
+
{
|
| 680 |
+
"cell_type": "code",
|
| 681 |
+
"execution_count": null,
|
| 682 |
+
"id": "a1a38079",
|
| 683 |
+
"metadata": {},
|
| 684 |
+
"outputs": [],
|
| 685 |
+
"source": [
|
| 686 |
+
"def LFinference(text, s_prev, ref_s, alpha = 0.3, beta = 0.7, t = 0.7, diffusion_steps=5, embedding_scale=1):\n",
|
| 687 |
+
" text = text.strip()\n",
|
| 688 |
+
" ps = global_phonemizer.phonemize([text])\n",
|
| 689 |
+
" ps = word_tokenize(ps[0])\n",
|
| 690 |
+
" ps = ' '.join(ps)\n",
|
| 691 |
+
" ps = ps.replace('``', '\"')\n",
|
| 692 |
+
" ps = ps.replace(\"''\", '\"')\n",
|
| 693 |
+
"\n",
|
| 694 |
+
" tokens = textclenaer(ps)\n",
|
| 695 |
+
" tokens.insert(0, 0)\n",
|
| 696 |
+
" tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n",
|
| 697 |
+
" \n",
|
| 698 |
+
" with torch.no_grad():\n",
|
| 699 |
+
" input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)\n",
|
| 700 |
+
" text_mask = length_to_mask(input_lengths).to(device)\n",
|
| 701 |
+
"\n",
|
| 702 |
+
" t_en = model.text_encoder(tokens, input_lengths, text_mask)\n",
|
| 703 |
+
" bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())\n",
|
| 704 |
+
" d_en = model.bert_encoder(bert_dur).transpose(-1, -2) \n",
|
| 705 |
+
"\n",
|
| 706 |
+
" s_pred = sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(device), \n",
|
| 707 |
+
" embedding=bert_dur,\n",
|
| 708 |
+
" embedding_scale=embedding_scale,\n",
|
| 709 |
+
" features=ref_s, # reference from the same speaker as the embedding\n",
|
| 710 |
+
" num_steps=diffusion_steps).squeeze(1)\n",
|
| 711 |
+
" \n",
|
| 712 |
+
" if s_prev is not None:\n",
|
| 713 |
+
" # convex combination of previous and current style\n",
|
| 714 |
+
" s_pred = t * s_prev + (1 - t) * s_pred\n",
|
| 715 |
+
" \n",
|
| 716 |
+
" s = s_pred[:, 128:]\n",
|
| 717 |
+
" ref = s_pred[:, :128]\n",
|
| 718 |
+
" \n",
|
| 719 |
+
" ref = alpha * ref + (1 - alpha) * ref_s[:, :128]\n",
|
| 720 |
+
" s = beta * s + (1 - beta) * ref_s[:, 128:]\n",
|
| 721 |
+
"\n",
|
| 722 |
+
" s_pred = torch.cat([ref, s], dim=-1)\n",
|
| 723 |
+
"\n",
|
| 724 |
+
" d = model.predictor.text_encoder(d_en, \n",
|
| 725 |
+
" s, input_lengths, text_mask)\n",
|
| 726 |
+
"\n",
|
| 727 |
+
" x, _ = model.predictor.lstm(d)\n",
|
| 728 |
+
" duration = model.predictor.duration_proj(x)\n",
|
| 729 |
+
"\n",
|
| 730 |
+
" duration = torch.sigmoid(duration).sum(axis=-1)\n",
|
| 731 |
+
" pred_dur = torch.round(duration.squeeze()).clamp(min=1)\n",
|
| 732 |
+
"\n",
|
| 733 |
+
"\n",
|
| 734 |
+
" pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))\n",
|
| 735 |
+
" c_frame = 0\n",
|
| 736 |
+
" for i in range(pred_aln_trg.size(0)):\n",
|
| 737 |
+
" pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1\n",
|
| 738 |
+
" c_frame += int(pred_dur[i].data)\n",
|
| 739 |
+
"\n",
|
| 740 |
+
" # encode prosody\n",
|
| 741 |
+
" en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 742 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 743 |
+
" asr_new = torch.zeros_like(en)\n",
|
| 744 |
+
" asr_new[:, :, 0] = en[:, :, 0]\n",
|
| 745 |
+
" asr_new[:, :, 1:] = en[:, :, 0:-1]\n",
|
| 746 |
+
" en = asr_new\n",
|
| 747 |
+
"\n",
|
| 748 |
+
" F0_pred, N_pred = model.predictor.F0Ntrain(en, s)\n",
|
| 749 |
+
"\n",
|
| 750 |
+
" asr = (t_en @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 751 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 752 |
+
" asr_new = torch.zeros_like(asr)\n",
|
| 753 |
+
" asr_new[:, :, 0] = asr[:, :, 0]\n",
|
| 754 |
+
" asr_new[:, :, 1:] = asr[:, :, 0:-1]\n",
|
| 755 |
+
" asr = asr_new\n",
|
| 756 |
+
"\n",
|
| 757 |
+
" out = model.decoder(asr, \n",
|
| 758 |
+
" F0_pred, N_pred, ref.squeeze().unsqueeze(0))\n",
|
| 759 |
+
" \n",
|
| 760 |
+
" \n",
|
| 761 |
+
" return out.squeeze().cpu().numpy()[..., :-100], s_pred # weird pulse at the end of the model, need to be fixed later"
|
| 762 |
+
]
|
| 763 |
+
},
|
| 764 |
+
{
|
| 765 |
+
"cell_type": "code",
|
| 766 |
+
"execution_count": null,
|
| 767 |
+
"id": "e9088f7a",
|
| 768 |
+
"metadata": {},
|
| 769 |
+
"outputs": [],
|
| 770 |
+
"source": [
|
| 771 |
+
"# unseen speaker\n",
|
| 772 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 773 |
+
"s_ref = compute_style(path)\n",
|
| 774 |
+
"sentences = passage.split('.') # simple split by comma\n",
|
| 775 |
+
"wavs = []\n",
|
| 776 |
+
"s_prev = None\n",
|
| 777 |
+
"for text in sentences:\n",
|
| 778 |
+
" if text.strip() == \"\": continue\n",
|
| 779 |
+
" text += '.' # add it back\n",
|
| 780 |
+
" \n",
|
| 781 |
+
" wav, s_prev = LFinference(text, \n",
|
| 782 |
+
" s_prev, \n",
|
| 783 |
+
" s_ref, \n",
|
| 784 |
+
" alpha = 0.3, \n",
|
| 785 |
+
" beta = 0.9, # make it more suitable for the text\n",
|
| 786 |
+
" t = 0.7, \n",
|
| 787 |
+
" diffusion_steps=10, embedding_scale=1.5)\n",
|
| 788 |
+
" wavs.append(wav)\n",
|
| 789 |
+
"print('Synthesized: ')\n",
|
| 790 |
+
"display(ipd.Audio(np.concatenate(wavs), rate=24000, normalize=False))\n",
|
| 791 |
+
"print('Reference: ')\n",
|
| 792 |
+
"display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 793 |
+
]
|
| 794 |
+
},
|
| 795 |
+
{
|
| 796 |
+
"cell_type": "markdown",
|
| 797 |
+
"id": "7517b657",
|
| 798 |
+
"metadata": {},
|
| 799 |
+
"source": [
|
| 800 |
+
"### Style Transfer\n",
|
| 801 |
+
"\n",
|
| 802 |
+
"The following section demostrates the style transfer capacity for unseen speakers in [Section 6](https://styletts2.github.io/#emo) of the demo page. For this, we set `alpha=0.5, beta = 0.9` for the most pronounced effects (mostly using the sampled style). "
|
| 803 |
+
]
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"cell_type": "code",
|
| 807 |
+
"execution_count": null,
|
| 808 |
+
"id": "ed95d0f7",
|
| 809 |
+
"metadata": {},
|
| 810 |
+
"outputs": [],
|
| 811 |
+
"source": [
|
| 812 |
+
"def STinference(text, ref_s, ref_text, alpha = 0.3, beta = 0.7, diffusion_steps=5, embedding_scale=1):\n",
|
| 813 |
+
" text = text.strip()\n",
|
| 814 |
+
" ps = global_phonemizer.phonemize([text])\n",
|
| 815 |
+
" ps = word_tokenize(ps[0])\n",
|
| 816 |
+
" ps = ' '.join(ps)\n",
|
| 817 |
+
"\n",
|
| 818 |
+
" tokens = textclenaer(ps)\n",
|
| 819 |
+
" tokens.insert(0, 0)\n",
|
| 820 |
+
" tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n",
|
| 821 |
+
" \n",
|
| 822 |
+
" ref_text = ref_text.strip()\n",
|
| 823 |
+
" ps = global_phonemizer.phonemize([ref_text])\n",
|
| 824 |
+
" ps = word_tokenize(ps[0])\n",
|
| 825 |
+
" ps = ' '.join(ps)\n",
|
| 826 |
+
"\n",
|
| 827 |
+
" ref_tokens = textclenaer(ps)\n",
|
| 828 |
+
" ref_tokens.insert(0, 0)\n",
|
| 829 |
+
" ref_tokens = torch.LongTensor(ref_tokens).to(device).unsqueeze(0)\n",
|
| 830 |
+
" \n",
|
| 831 |
+
" \n",
|
| 832 |
+
" with torch.no_grad():\n",
|
| 833 |
+
" input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)\n",
|
| 834 |
+
" text_mask = length_to_mask(input_lengths).to(device)\n",
|
| 835 |
+
"\n",
|
| 836 |
+
" t_en = model.text_encoder(tokens, input_lengths, text_mask)\n",
|
| 837 |
+
" bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())\n",
|
| 838 |
+
" d_en = model.bert_encoder(bert_dur).transpose(-1, -2) \n",
|
| 839 |
+
" \n",
|
| 840 |
+
" ref_input_lengths = torch.LongTensor([ref_tokens.shape[-1]]).to(device)\n",
|
| 841 |
+
" ref_text_mask = length_to_mask(ref_input_lengths).to(device)\n",
|
| 842 |
+
" ref_bert_dur = model.bert(ref_tokens, attention_mask=(~ref_text_mask).int())\n",
|
| 843 |
+
" s_pred = sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(device), \n",
|
| 844 |
+
" embedding=bert_dur,\n",
|
| 845 |
+
" embedding_scale=embedding_scale,\n",
|
| 846 |
+
" features=ref_s, # reference from the same speaker as the embedding\n",
|
| 847 |
+
" num_steps=diffusion_steps).squeeze(1)\n",
|
| 848 |
+
"\n",
|
| 849 |
+
"\n",
|
| 850 |
+
" s = s_pred[:, 128:]\n",
|
| 851 |
+
" ref = s_pred[:, :128]\n",
|
| 852 |
+
"\n",
|
| 853 |
+
" ref = alpha * ref + (1 - alpha) * ref_s[:, :128]\n",
|
| 854 |
+
" s = beta * s + (1 - beta) * ref_s[:, 128:]\n",
|
| 855 |
+
"\n",
|
| 856 |
+
" d = model.predictor.text_encoder(d_en, \n",
|
| 857 |
+
" s, input_lengths, text_mask)\n",
|
| 858 |
+
"\n",
|
| 859 |
+
" x, _ = model.predictor.lstm(d)\n",
|
| 860 |
+
" duration = model.predictor.duration_proj(x)\n",
|
| 861 |
+
"\n",
|
| 862 |
+
" duration = torch.sigmoid(duration).sum(axis=-1)\n",
|
| 863 |
+
" pred_dur = torch.round(duration.squeeze()).clamp(min=1)\n",
|
| 864 |
+
"\n",
|
| 865 |
+
"\n",
|
| 866 |
+
" pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))\n",
|
| 867 |
+
" c_frame = 0\n",
|
| 868 |
+
" for i in range(pred_aln_trg.size(0)):\n",
|
| 869 |
+
" pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1\n",
|
| 870 |
+
" c_frame += int(pred_dur[i].data)\n",
|
| 871 |
+
"\n",
|
| 872 |
+
" # encode prosody\n",
|
| 873 |
+
" en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 874 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 875 |
+
" asr_new = torch.zeros_like(en)\n",
|
| 876 |
+
" asr_new[:, :, 0] = en[:, :, 0]\n",
|
| 877 |
+
" asr_new[:, :, 1:] = en[:, :, 0:-1]\n",
|
| 878 |
+
" en = asr_new\n",
|
| 879 |
+
"\n",
|
| 880 |
+
" F0_pred, N_pred = model.predictor.F0Ntrain(en, s)\n",
|
| 881 |
+
"\n",
|
| 882 |
+
" asr = (t_en @ pred_aln_trg.unsqueeze(0).to(device))\n",
|
| 883 |
+
" if model_params.decoder.type == \"hifigan\":\n",
|
| 884 |
+
" asr_new = torch.zeros_like(asr)\n",
|
| 885 |
+
" asr_new[:, :, 0] = asr[:, :, 0]\n",
|
| 886 |
+
" asr_new[:, :, 1:] = asr[:, :, 0:-1]\n",
|
| 887 |
+
" asr = asr_new\n",
|
| 888 |
+
"\n",
|
| 889 |
+
" out = model.decoder(asr, \n",
|
| 890 |
+
" F0_pred, N_pred, ref.squeeze().unsqueeze(0))\n",
|
| 891 |
+
" \n",
|
| 892 |
+
" \n",
|
| 893 |
+
" return out.squeeze().cpu().numpy()[..., :-50] # weird pulse at the end of the model, need to be fixed later"
|
| 894 |
+
]
|
| 895 |
+
},
|
| 896 |
+
{
|
| 897 |
+
"cell_type": "code",
|
| 898 |
+
"execution_count": null,
|
| 899 |
+
"id": "ec3f0da4",
|
| 900 |
+
"metadata": {},
|
| 901 |
+
"outputs": [],
|
| 902 |
+
"source": [
|
| 903 |
+
"# reference texts to sample styles\n",
|
| 904 |
+
"\n",
|
| 905 |
+
"ref_texts = {}\n",
|
| 906 |
+
"ref_texts['Happy'] = \"We are happy to invite you to join us on a journey to the past, where we will visit the most amazing monuments ever built by human hands.\"\n",
|
| 907 |
+
"ref_texts['Sad'] = \"I am sorry to say that we have suffered a severe setback in our efforts to restore prosperity and confidence.\"\n",
|
| 908 |
+
"ref_texts['Angry'] = \"The field of astronomy is a joke! Its theories are based on flawed observations and biased interpretations!\"\n",
|
| 909 |
+
"ref_texts['Surprised'] = \"I can't believe it! You mean to tell me that you have discovered a new species of bacteria in this pond?\""
|
| 910 |
+
]
|
| 911 |
+
},
|
| 912 |
+
{
|
| 913 |
+
"cell_type": "code",
|
| 914 |
+
"execution_count": null,
|
| 915 |
+
"id": "6d0a3825",
|
| 916 |
+
"metadata": {
|
| 917 |
+
"scrolled": false
|
| 918 |
+
},
|
| 919 |
+
"outputs": [],
|
| 920 |
+
"source": [
|
| 921 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 922 |
+
"s_ref = compute_style(path)\n",
|
| 923 |
+
"\n",
|
| 924 |
+
"text = \"Yea, his honourable worship is within, but he hath a godly minister or two with him, and likewise a leech.\"\n",
|
| 925 |
+
"for k,v in ref_texts.items():\n",
|
| 926 |
+
" wav = STinference(text, s_ref, v, diffusion_steps=10, alpha=0.5, beta=0.9, embedding_scale=1.5)\n",
|
| 927 |
+
" print(k + \": \")\n",
|
| 928 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 929 |
+
]
|
| 930 |
+
},
|
| 931 |
+
{
|
| 932 |
+
"cell_type": "markdown",
|
| 933 |
+
"id": "6750aed9",
|
| 934 |
+
"metadata": {},
|
| 935 |
+
"source": [
|
| 936 |
+
"### Speech diversity\n",
|
| 937 |
+
"\n",
|
| 938 |
+
"This section reproduces samples in [Section 7](https://styletts2.github.io/#var) of the demo page. \n",
|
| 939 |
+
"\n",
|
| 940 |
+
"`alpha` and `beta` determine the diversity of the synthesized speech. There are two extreme cases:\n",
|
| 941 |
+
"- If `alpha = 1` and `beta = 1`, the synthesized speech sounds the most dissimilar to the reference speaker, but it is also the most diverse (each time you synthesize a speech it will be totally different). \n",
|
| 942 |
+
"- If `alpha = 0` and `beta = 0`, the synthesized speech sounds the most siimlar to the reference speaker, but it is deterministic (i.e., the sampled style is not used for speech synthesis). \n"
|
| 943 |
+
]
|
| 944 |
+
},
|
| 945 |
+
{
|
| 946 |
+
"cell_type": "markdown",
|
| 947 |
+
"id": "f6ae0aa5",
|
| 948 |
+
"metadata": {},
|
| 949 |
+
"source": [
|
| 950 |
+
"#### Default setting (`alpha = 0.3, beta=0.7`)\n",
|
| 951 |
+
"This setting uses 70% of the reference timbre and 30% of the reference prosody and use the diffusion model to sample them based on the text. "
|
| 952 |
+
]
|
| 953 |
+
},
|
| 954 |
+
{
|
| 955 |
+
"cell_type": "code",
|
| 956 |
+
"execution_count": null,
|
| 957 |
+
"id": "36dc0148",
|
| 958 |
+
"metadata": {},
|
| 959 |
+
"outputs": [],
|
| 960 |
+
"source": [
|
| 961 |
+
"# unseen speaker\n",
|
| 962 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 963 |
+
"ref_s = compute_style(path)\n",
|
| 964 |
+
"\n",
|
| 965 |
+
"text = \"How much variation is there?\"\n",
|
| 966 |
+
"for _ in range(5):\n",
|
| 967 |
+
" wav = inference(text, ref_s, diffusion_steps=10, alpha=0.3, beta=0.7, embedding_scale=1)\n",
|
| 968 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 969 |
+
]
|
| 970 |
+
},
|
| 971 |
+
{
|
| 972 |
+
"cell_type": "markdown",
|
| 973 |
+
"id": "bf9ef421",
|
| 974 |
+
"metadata": {},
|
| 975 |
+
"source": [
|
| 976 |
+
"#### Less diverse setting (`alpha = 0.1, beta=0.3`)\n",
|
| 977 |
+
"This setting uses 90% of the reference timbre and 70% of the reference prosody. This makes it more similar to the reference speaker at cost of less diverse samples. "
|
| 978 |
+
]
|
| 979 |
+
},
|
| 980 |
+
{
|
| 981 |
+
"cell_type": "code",
|
| 982 |
+
"execution_count": null,
|
| 983 |
+
"id": "9ba406bd",
|
| 984 |
+
"metadata": {},
|
| 985 |
+
"outputs": [],
|
| 986 |
+
"source": [
|
| 987 |
+
"# unseen speaker\n",
|
| 988 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 989 |
+
"ref_s = compute_style(path)\n",
|
| 990 |
+
"\n",
|
| 991 |
+
"text = \"How much variation is there?\"\n",
|
| 992 |
+
"for _ in range(5):\n",
|
| 993 |
+
" wav = inference(text, ref_s, diffusion_steps=10, alpha=0.1, beta=0.3, embedding_scale=1)\n",
|
| 994 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 995 |
+
]
|
| 996 |
+
},
|
| 997 |
+
{
|
| 998 |
+
"cell_type": "markdown",
|
| 999 |
+
"id": "a38fe464",
|
| 1000 |
+
"metadata": {},
|
| 1001 |
+
"source": [
|
| 1002 |
+
"#### More diverse setting (`alpha = 0.5, beta=0.95`)\n",
|
| 1003 |
+
"This setting uses 50% of the reference timbre and 5% of the reference prosody (so it uses 100% of the sampled prosody, which makes it more diverse), but this makes it more dissimilar to the reference speaker. "
|
| 1004 |
+
]
|
| 1005 |
+
},
|
| 1006 |
+
{
|
| 1007 |
+
"cell_type": "code",
|
| 1008 |
+
"execution_count": null,
|
| 1009 |
+
"id": "5f25bf94",
|
| 1010 |
+
"metadata": {},
|
| 1011 |
+
"outputs": [],
|
| 1012 |
+
"source": [
|
| 1013 |
+
"# unseen speaker\n",
|
| 1014 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 1015 |
+
"ref_s = compute_style(path)\n",
|
| 1016 |
+
"\n",
|
| 1017 |
+
"text = \"How much variation is there?\"\n",
|
| 1018 |
+
"for _ in range(5):\n",
|
| 1019 |
+
" wav = inference(text, ref_s, diffusion_steps=10, alpha=0.5, beta=0.95, embedding_scale=1)\n",
|
| 1020 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 1021 |
+
]
|
| 1022 |
+
},
|
| 1023 |
+
{
|
| 1024 |
+
"cell_type": "markdown",
|
| 1025 |
+
"id": "21c3a071",
|
| 1026 |
+
"metadata": {},
|
| 1027 |
+
"source": [
|
| 1028 |
+
"#### Extreme setting (`alpha = 1, beta=1`)\n",
|
| 1029 |
+
"This setting uses 0% of the reference timbre and prosody and use the diffusion model to sample the entire style. This makes the speaker very dissimilar to the reference speaker. "
|
| 1030 |
+
]
|
| 1031 |
+
},
|
| 1032 |
+
{
|
| 1033 |
+
"cell_type": "code",
|
| 1034 |
+
"execution_count": null,
|
| 1035 |
+
"id": "fff8bab1",
|
| 1036 |
+
"metadata": {},
|
| 1037 |
+
"outputs": [],
|
| 1038 |
+
"source": [
|
| 1039 |
+
"# unseen speaker\n",
|
| 1040 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 1041 |
+
"ref_s = compute_style(path)\n",
|
| 1042 |
+
"\n",
|
| 1043 |
+
"text = \"How much variation is there?\"\n",
|
| 1044 |
+
"for _ in range(5):\n",
|
| 1045 |
+
" wav = inference(text, ref_s, diffusion_steps=10, alpha=1, beta=1, embedding_scale=1)\n",
|
| 1046 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 1047 |
+
]
|
| 1048 |
+
},
|
| 1049 |
+
{
|
| 1050 |
+
"cell_type": "markdown",
|
| 1051 |
+
"id": "a8741e5a",
|
| 1052 |
+
"metadata": {},
|
| 1053 |
+
"source": [
|
| 1054 |
+
"#### No variation (`alpha = 0, beta=0`)\n",
|
| 1055 |
+
"This setting uses 0% of the reference timbre and prosody and use the diffusion model to sample the entire style. This makes the speaker very similar to the reference speaker, but there is no variation. "
|
| 1056 |
+
]
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"cell_type": "code",
|
| 1060 |
+
"execution_count": null,
|
| 1061 |
+
"id": "e55dd281",
|
| 1062 |
+
"metadata": {},
|
| 1063 |
+
"outputs": [],
|
| 1064 |
+
"source": [
|
| 1065 |
+
"# unseen speaker\n",
|
| 1066 |
+
"path = \"Demo/reference_audio/1221-135767-0014.wav\"\n",
|
| 1067 |
+
"ref_s = compute_style(path)\n",
|
| 1068 |
+
"\n",
|
| 1069 |
+
"text = \"How much variation is there?\"\n",
|
| 1070 |
+
"for _ in range(5):\n",
|
| 1071 |
+
" wav = inference(text, ref_s, diffusion_steps=10, alpha=0, beta=0, embedding_scale=1)\n",
|
| 1072 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))"
|
| 1073 |
+
]
|
| 1074 |
+
},
|
| 1075 |
+
{
|
| 1076 |
+
"cell_type": "markdown",
|
| 1077 |
+
"id": "d5e86423",
|
| 1078 |
+
"metadata": {},
|
| 1079 |
+
"source": [
|
| 1080 |
+
"### Extra fun!\n",
|
| 1081 |
+
"\n",
|
| 1082 |
+
"Here we clone some of the authors' voice of the StyleTTS 2 papers with a few seconds of the recording in the wild. None of the voices is in the dataset and all authors agreed to have their voices cloned here."
|
| 1083 |
+
]
|
| 1084 |
+
},
|
| 1085 |
+
{
|
| 1086 |
+
"cell_type": "code",
|
| 1087 |
+
"execution_count": null,
|
| 1088 |
+
"id": "6f558314",
|
| 1089 |
+
"metadata": {},
|
| 1090 |
+
"outputs": [],
|
| 1091 |
+
"source": [
|
| 1092 |
+
"text = ''' StyleTTS 2 is a text to speech model that leverages style diffusion and adversarial training with large speech language models to achieve human level text to speech synthesis. '''"
|
| 1093 |
+
]
|
| 1094 |
+
},
|
| 1095 |
+
{
|
| 1096 |
+
"cell_type": "code",
|
| 1097 |
+
"execution_count": null,
|
| 1098 |
+
"id": "caa5747c",
|
| 1099 |
+
"metadata": {},
|
| 1100 |
+
"outputs": [],
|
| 1101 |
+
"source": [
|
| 1102 |
+
"reference_dicts = {}\n",
|
| 1103 |
+
"reference_dicts['Yinghao'] = \"Demo/reference_audio/Yinghao.wav\"\n",
|
| 1104 |
+
"reference_dicts['Gavin'] = \"Demo/reference_audio/Gavin.wav\"\n",
|
| 1105 |
+
"reference_dicts['Vinay'] = \"Demo/reference_audio/Vinay.wav\"\n",
|
| 1106 |
+
"reference_dicts['Nima'] = \"Demo/reference_audio/Nima.wav\""
|
| 1107 |
+
]
|
| 1108 |
+
},
|
| 1109 |
+
{
|
| 1110 |
+
"cell_type": "code",
|
| 1111 |
+
"execution_count": null,
|
| 1112 |
+
"id": "44a4cea1",
|
| 1113 |
+
"metadata": {
|
| 1114 |
+
"scrolled": false
|
| 1115 |
+
},
|
| 1116 |
+
"outputs": [],
|
| 1117 |
+
"source": [
|
| 1118 |
+
"start = time.time()\n",
|
| 1119 |
+
"noise = torch.randn(1,1,256).to(device)\n",
|
| 1120 |
+
"for k, path in reference_dicts.items():\n",
|
| 1121 |
+
" ref_s = compute_style(path)\n",
|
| 1122 |
+
" \n",
|
| 1123 |
+
" wav = inference(text, ref_s, alpha=0.1, beta=0.5, diffusion_steps=5, embedding_scale=1)\n",
|
| 1124 |
+
" rtf = (time.time() - start) / (len(wav) / 24000)\n",
|
| 1125 |
+
" print('Speaker: ' + k)\n",
|
| 1126 |
+
" import IPython.display as ipd\n",
|
| 1127 |
+
" print('Synthesized:')\n",
|
| 1128 |
+
" display(ipd.Audio(wav, rate=24000, normalize=False))\n",
|
| 1129 |
+
" print('Reference:')\n",
|
| 1130 |
+
" display(ipd.Audio(path, rate=24000, normalize=False))"
|
| 1131 |
+
]
|
| 1132 |
+
}
|
| 1133 |
+
],
|
| 1134 |
+
"metadata": {
|
| 1135 |
+
"kernelspec": {
|
| 1136 |
+
"display_name": "NLP",
|
| 1137 |
+
"language": "python",
|
| 1138 |
+
"name": "nlp"
|
| 1139 |
+
},
|
| 1140 |
+
"language_info": {
|
| 1141 |
+
"codemirror_mode": {
|
| 1142 |
+
"name": "ipython",
|
| 1143 |
+
"version": 3
|
| 1144 |
+
},
|
| 1145 |
+
"file_extension": ".py",
|
| 1146 |
+
"mimetype": "text/x-python",
|
| 1147 |
+
"name": "python",
|
| 1148 |
+
"nbconvert_exporter": "python",
|
| 1149 |
+
"pygments_lexer": "ipython3",
|
| 1150 |
+
"version": "3.9.7"
|
| 1151 |
+
}
|
| 1152 |
+
},
|
| 1153 |
+
"nbformat": 4,
|
| 1154 |
+
"nbformat_minor": 5
|
| 1155 |
+
}
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2023 Aaron (Yinghao) Li
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
Modules/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
Modules/diffusion/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
Modules/diffusion/diffusion.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from math import pi
|
| 2 |
+
from random import randint
|
| 3 |
+
from typing import Any, Optional, Sequence, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from einops import rearrange
|
| 7 |
+
from torch import Tensor, nn
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
|
| 10 |
+
from .utils import *
|
| 11 |
+
from .sampler import *
|
| 12 |
+
|
| 13 |
+
"""
|
| 14 |
+
Diffusion Classes (generic for 1d data)
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Model1d(nn.Module):
|
| 19 |
+
def __init__(self, unet_type: str = "base", **kwargs):
|
| 20 |
+
super().__init__()
|
| 21 |
+
diffusion_kwargs, kwargs = groupby("diffusion_", kwargs)
|
| 22 |
+
self.unet = None
|
| 23 |
+
self.diffusion = None
|
| 24 |
+
|
| 25 |
+
def forward(self, x: Tensor, **kwargs) -> Tensor:
|
| 26 |
+
return self.diffusion(x, **kwargs)
|
| 27 |
+
|
| 28 |
+
def sample(self, *args, **kwargs) -> Tensor:
|
| 29 |
+
return self.diffusion.sample(*args, **kwargs)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
"""
|
| 33 |
+
Audio Diffusion Classes (specific for 1d audio data)
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def get_default_model_kwargs():
|
| 38 |
+
return dict(
|
| 39 |
+
channels=128,
|
| 40 |
+
patch_size=16,
|
| 41 |
+
multipliers=[1, 2, 4, 4, 4, 4, 4],
|
| 42 |
+
factors=[4, 4, 4, 2, 2, 2],
|
| 43 |
+
num_blocks=[2, 2, 2, 2, 2, 2],
|
| 44 |
+
attentions=[0, 0, 0, 1, 1, 1, 1],
|
| 45 |
+
attention_heads=8,
|
| 46 |
+
attention_features=64,
|
| 47 |
+
attention_multiplier=2,
|
| 48 |
+
attention_use_rel_pos=False,
|
| 49 |
+
diffusion_type="v",
|
| 50 |
+
diffusion_sigma_distribution=UniformDistribution(),
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def get_default_sampling_kwargs():
|
| 55 |
+
return dict(sigma_schedule=LinearSchedule(), sampler=VSampler(), clamp=True)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class AudioDiffusionModel(Model1d):
|
| 59 |
+
def __init__(self, **kwargs):
|
| 60 |
+
super().__init__(**{**get_default_model_kwargs(), **kwargs})
|
| 61 |
+
|
| 62 |
+
def sample(self, *args, **kwargs):
|
| 63 |
+
return super().sample(*args, **{**get_default_sampling_kwargs(), **kwargs})
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class AudioDiffusionConditional(Model1d):
|
| 67 |
+
def __init__(
|
| 68 |
+
self,
|
| 69 |
+
embedding_features: int,
|
| 70 |
+
embedding_max_length: int,
|
| 71 |
+
embedding_mask_proba: float = 0.1,
|
| 72 |
+
**kwargs,
|
| 73 |
+
):
|
| 74 |
+
self.embedding_mask_proba = embedding_mask_proba
|
| 75 |
+
default_kwargs = dict(
|
| 76 |
+
**get_default_model_kwargs(),
|
| 77 |
+
unet_type="cfg",
|
| 78 |
+
context_embedding_features=embedding_features,
|
| 79 |
+
context_embedding_max_length=embedding_max_length,
|
| 80 |
+
)
|
| 81 |
+
super().__init__(**{**default_kwargs, **kwargs})
|
| 82 |
+
|
| 83 |
+
def forward(self, *args, **kwargs):
|
| 84 |
+
default_kwargs = dict(embedding_mask_proba=self.embedding_mask_proba)
|
| 85 |
+
return super().forward(*args, **{**default_kwargs, **kwargs})
|
| 86 |
+
|
| 87 |
+
def sample(self, *args, **kwargs):
|
| 88 |
+
default_kwargs = dict(
|
| 89 |
+
**get_default_sampling_kwargs(),
|
| 90 |
+
embedding_scale=5.0,
|
| 91 |
+
)
|
| 92 |
+
return super().sample(*args, **{**default_kwargs, **kwargs})
|
| 93 |
+
|
| 94 |
+
|
Modules/diffusion/modules.py
ADDED
|
@@ -0,0 +1,693 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from math import floor, log, pi
|
| 2 |
+
from typing import Any, List, Optional, Sequence, Tuple, Union
|
| 3 |
+
|
| 4 |
+
from .utils import *
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
from einops import rearrange, reduce, repeat
|
| 9 |
+
from einops.layers.torch import Rearrange
|
| 10 |
+
from einops_exts import rearrange_many
|
| 11 |
+
from torch import Tensor, einsum
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
"""
|
| 15 |
+
Utils
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
class AdaLayerNorm(nn.Module):
|
| 19 |
+
def __init__(self, style_dim, channels, eps=1e-5):
|
| 20 |
+
super().__init__()
|
| 21 |
+
self.channels = channels
|
| 22 |
+
self.eps = eps
|
| 23 |
+
|
| 24 |
+
self.fc = nn.Linear(style_dim, channels*2)
|
| 25 |
+
|
| 26 |
+
def forward(self, x, s):
|
| 27 |
+
x = x.transpose(-1, -2)
|
| 28 |
+
x = x.transpose(1, -1)
|
| 29 |
+
|
| 30 |
+
h = self.fc(s)
|
| 31 |
+
h = h.view(h.size(0), h.size(1), 1)
|
| 32 |
+
gamma, beta = torch.chunk(h, chunks=2, dim=1)
|
| 33 |
+
gamma, beta = gamma.transpose(1, -1), beta.transpose(1, -1)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
x = F.layer_norm(x, (self.channels,), eps=self.eps)
|
| 37 |
+
x = (1 + gamma) * x + beta
|
| 38 |
+
return x.transpose(1, -1).transpose(-1, -2)
|
| 39 |
+
|
| 40 |
+
class StyleTransformer1d(nn.Module):
|
| 41 |
+
def __init__(
|
| 42 |
+
self,
|
| 43 |
+
num_layers: int,
|
| 44 |
+
channels: int,
|
| 45 |
+
num_heads: int,
|
| 46 |
+
head_features: int,
|
| 47 |
+
multiplier: int,
|
| 48 |
+
use_context_time: bool = True,
|
| 49 |
+
use_rel_pos: bool = False,
|
| 50 |
+
context_features_multiplier: int = 1,
|
| 51 |
+
rel_pos_num_buckets: Optional[int] = None,
|
| 52 |
+
rel_pos_max_distance: Optional[int] = None,
|
| 53 |
+
context_features: Optional[int] = None,
|
| 54 |
+
context_embedding_features: Optional[int] = None,
|
| 55 |
+
embedding_max_length: int = 512,
|
| 56 |
+
):
|
| 57 |
+
super().__init__()
|
| 58 |
+
|
| 59 |
+
self.blocks = nn.ModuleList(
|
| 60 |
+
[
|
| 61 |
+
StyleTransformerBlock(
|
| 62 |
+
features=channels + context_embedding_features,
|
| 63 |
+
head_features=head_features,
|
| 64 |
+
num_heads=num_heads,
|
| 65 |
+
multiplier=multiplier,
|
| 66 |
+
style_dim=context_features,
|
| 67 |
+
use_rel_pos=use_rel_pos,
|
| 68 |
+
rel_pos_num_buckets=rel_pos_num_buckets,
|
| 69 |
+
rel_pos_max_distance=rel_pos_max_distance,
|
| 70 |
+
)
|
| 71 |
+
for i in range(num_layers)
|
| 72 |
+
]
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
self.to_out = nn.Sequential(
|
| 76 |
+
Rearrange("b t c -> b c t"),
|
| 77 |
+
nn.Conv1d(
|
| 78 |
+
in_channels=channels + context_embedding_features,
|
| 79 |
+
out_channels=channels,
|
| 80 |
+
kernel_size=1,
|
| 81 |
+
),
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
use_context_features = exists(context_features)
|
| 85 |
+
self.use_context_features = use_context_features
|
| 86 |
+
self.use_context_time = use_context_time
|
| 87 |
+
|
| 88 |
+
if use_context_time or use_context_features:
|
| 89 |
+
context_mapping_features = channels + context_embedding_features
|
| 90 |
+
|
| 91 |
+
self.to_mapping = nn.Sequential(
|
| 92 |
+
nn.Linear(context_mapping_features, context_mapping_features),
|
| 93 |
+
nn.GELU(),
|
| 94 |
+
nn.Linear(context_mapping_features, context_mapping_features),
|
| 95 |
+
nn.GELU(),
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
if use_context_time:
|
| 99 |
+
assert exists(context_mapping_features)
|
| 100 |
+
self.to_time = nn.Sequential(
|
| 101 |
+
TimePositionalEmbedding(
|
| 102 |
+
dim=channels, out_features=context_mapping_features
|
| 103 |
+
),
|
| 104 |
+
nn.GELU(),
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
if use_context_features:
|
| 108 |
+
assert exists(context_features) and exists(context_mapping_features)
|
| 109 |
+
self.to_features = nn.Sequential(
|
| 110 |
+
nn.Linear(
|
| 111 |
+
in_features=context_features, out_features=context_mapping_features
|
| 112 |
+
),
|
| 113 |
+
nn.GELU(),
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
self.fixed_embedding = FixedEmbedding(
|
| 117 |
+
max_length=embedding_max_length, features=context_embedding_features
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def get_mapping(
|
| 122 |
+
self, time: Optional[Tensor] = None, features: Optional[Tensor] = None
|
| 123 |
+
) -> Optional[Tensor]:
|
| 124 |
+
"""Combines context time features and features into mapping"""
|
| 125 |
+
items, mapping = [], None
|
| 126 |
+
# Compute time features
|
| 127 |
+
if self.use_context_time:
|
| 128 |
+
assert_message = "use_context_time=True but no time features provided"
|
| 129 |
+
assert exists(time), assert_message
|
| 130 |
+
items += [self.to_time(time)]
|
| 131 |
+
# Compute features
|
| 132 |
+
if self.use_context_features:
|
| 133 |
+
assert_message = "context_features exists but no features provided"
|
| 134 |
+
assert exists(features), assert_message
|
| 135 |
+
items += [self.to_features(features)]
|
| 136 |
+
|
| 137 |
+
# Compute joint mapping
|
| 138 |
+
if self.use_context_time or self.use_context_features:
|
| 139 |
+
mapping = reduce(torch.stack(items), "n b m -> b m", "sum")
|
| 140 |
+
mapping = self.to_mapping(mapping)
|
| 141 |
+
|
| 142 |
+
return mapping
|
| 143 |
+
|
| 144 |
+
def run(self, x, time, embedding, features):
|
| 145 |
+
|
| 146 |
+
mapping = self.get_mapping(time, features)
|
| 147 |
+
x = torch.cat([x.expand(-1, embedding.size(1), -1), embedding], axis=-1)
|
| 148 |
+
mapping = mapping.unsqueeze(1).expand(-1, embedding.size(1), -1)
|
| 149 |
+
|
| 150 |
+
for block in self.blocks:
|
| 151 |
+
x = x + mapping
|
| 152 |
+
x = block(x, features)
|
| 153 |
+
|
| 154 |
+
x = x.mean(axis=1).unsqueeze(1)
|
| 155 |
+
x = self.to_out(x)
|
| 156 |
+
x = x.transpose(-1, -2)
|
| 157 |
+
|
| 158 |
+
return x
|
| 159 |
+
|
| 160 |
+
def forward(self, x: Tensor,
|
| 161 |
+
time: Tensor,
|
| 162 |
+
embedding_mask_proba: float = 0.0,
|
| 163 |
+
embedding: Optional[Tensor] = None,
|
| 164 |
+
features: Optional[Tensor] = None,
|
| 165 |
+
embedding_scale: float = 1.0) -> Tensor:
|
| 166 |
+
|
| 167 |
+
b, device = embedding.shape[0], embedding.device
|
| 168 |
+
fixed_embedding = self.fixed_embedding(embedding)
|
| 169 |
+
if embedding_mask_proba > 0.0:
|
| 170 |
+
# Randomly mask embedding
|
| 171 |
+
batch_mask = rand_bool(
|
| 172 |
+
shape=(b, 1, 1), proba=embedding_mask_proba, device=device
|
| 173 |
+
)
|
| 174 |
+
embedding = torch.where(batch_mask, fixed_embedding, embedding)
|
| 175 |
+
|
| 176 |
+
if embedding_scale != 1.0:
|
| 177 |
+
# Compute both normal and fixed embedding outputs
|
| 178 |
+
out = self.run(x, time, embedding=embedding, features=features)
|
| 179 |
+
out_masked = self.run(x, time, embedding=fixed_embedding, features=features)
|
| 180 |
+
# Scale conditional output using classifier-free guidance
|
| 181 |
+
return out_masked + (out - out_masked) * embedding_scale
|
| 182 |
+
else:
|
| 183 |
+
return self.run(x, time, embedding=embedding, features=features)
|
| 184 |
+
|
| 185 |
+
return x
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class StyleTransformerBlock(nn.Module):
|
| 189 |
+
def __init__(
|
| 190 |
+
self,
|
| 191 |
+
features: int,
|
| 192 |
+
num_heads: int,
|
| 193 |
+
head_features: int,
|
| 194 |
+
style_dim: int,
|
| 195 |
+
multiplier: int,
|
| 196 |
+
use_rel_pos: bool,
|
| 197 |
+
rel_pos_num_buckets: Optional[int] = None,
|
| 198 |
+
rel_pos_max_distance: Optional[int] = None,
|
| 199 |
+
context_features: Optional[int] = None,
|
| 200 |
+
):
|
| 201 |
+
super().__init__()
|
| 202 |
+
|
| 203 |
+
self.use_cross_attention = exists(context_features) and context_features > 0
|
| 204 |
+
|
| 205 |
+
self.attention = StyleAttention(
|
| 206 |
+
features=features,
|
| 207 |
+
style_dim=style_dim,
|
| 208 |
+
num_heads=num_heads,
|
| 209 |
+
head_features=head_features,
|
| 210 |
+
use_rel_pos=use_rel_pos,
|
| 211 |
+
rel_pos_num_buckets=rel_pos_num_buckets,
|
| 212 |
+
rel_pos_max_distance=rel_pos_max_distance,
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
if self.use_cross_attention:
|
| 216 |
+
self.cross_attention = StyleAttention(
|
| 217 |
+
features=features,
|
| 218 |
+
style_dim=style_dim,
|
| 219 |
+
num_heads=num_heads,
|
| 220 |
+
head_features=head_features,
|
| 221 |
+
context_features=context_features,
|
| 222 |
+
use_rel_pos=use_rel_pos,
|
| 223 |
+
rel_pos_num_buckets=rel_pos_num_buckets,
|
| 224 |
+
rel_pos_max_distance=rel_pos_max_distance,
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
self.feed_forward = FeedForward(features=features, multiplier=multiplier)
|
| 228 |
+
|
| 229 |
+
def forward(self, x: Tensor, s: Tensor, *, context: Optional[Tensor] = None) -> Tensor:
|
| 230 |
+
x = self.attention(x, s) + x
|
| 231 |
+
if self.use_cross_attention:
|
| 232 |
+
x = self.cross_attention(x, s, context=context) + x
|
| 233 |
+
x = self.feed_forward(x) + x
|
| 234 |
+
return x
|
| 235 |
+
|
| 236 |
+
class StyleAttention(nn.Module):
|
| 237 |
+
def __init__(
|
| 238 |
+
self,
|
| 239 |
+
features: int,
|
| 240 |
+
*,
|
| 241 |
+
style_dim: int,
|
| 242 |
+
head_features: int,
|
| 243 |
+
num_heads: int,
|
| 244 |
+
context_features: Optional[int] = None,
|
| 245 |
+
use_rel_pos: bool,
|
| 246 |
+
rel_pos_num_buckets: Optional[int] = None,
|
| 247 |
+
rel_pos_max_distance: Optional[int] = None,
|
| 248 |
+
):
|
| 249 |
+
super().__init__()
|
| 250 |
+
self.context_features = context_features
|
| 251 |
+
mid_features = head_features * num_heads
|
| 252 |
+
context_features = default(context_features, features)
|
| 253 |
+
|
| 254 |
+
self.norm = AdaLayerNorm(style_dim, features)
|
| 255 |
+
self.norm_context = AdaLayerNorm(style_dim, context_features)
|
| 256 |
+
self.to_q = nn.Linear(
|
| 257 |
+
in_features=features, out_features=mid_features, bias=False
|
| 258 |
+
)
|
| 259 |
+
self.to_kv = nn.Linear(
|
| 260 |
+
in_features=context_features, out_features=mid_features * 2, bias=False
|
| 261 |
+
)
|
| 262 |
+
self.attention = AttentionBase(
|
| 263 |
+
features,
|
| 264 |
+
num_heads=num_heads,
|
| 265 |
+
head_features=head_features,
|
| 266 |
+
use_rel_pos=use_rel_pos,
|
| 267 |
+
rel_pos_num_buckets=rel_pos_num_buckets,
|
| 268 |
+
rel_pos_max_distance=rel_pos_max_distance,
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
def forward(self, x: Tensor, s: Tensor, *, context: Optional[Tensor] = None) -> Tensor:
|
| 272 |
+
assert_message = "You must provide a context when using context_features"
|
| 273 |
+
assert not self.context_features or exists(context), assert_message
|
| 274 |
+
# Use context if provided
|
| 275 |
+
context = default(context, x)
|
| 276 |
+
# Normalize then compute q from input and k,v from context
|
| 277 |
+
x, context = self.norm(x, s), self.norm_context(context, s)
|
| 278 |
+
|
| 279 |
+
q, k, v = (self.to_q(x), *torch.chunk(self.to_kv(context), chunks=2, dim=-1))
|
| 280 |
+
# Compute and return attention
|
| 281 |
+
return self.attention(q, k, v)
|
| 282 |
+
|
| 283 |
+
class Transformer1d(nn.Module):
|
| 284 |
+
def __init__(
|
| 285 |
+
self,
|
| 286 |
+
num_layers: int,
|
| 287 |
+
channels: int,
|
| 288 |
+
num_heads: int,
|
| 289 |
+
head_features: int,
|
| 290 |
+
multiplier: int,
|
| 291 |
+
use_context_time: bool = True,
|
| 292 |
+
use_rel_pos: bool = False,
|
| 293 |
+
context_features_multiplier: int = 1,
|
| 294 |
+
rel_pos_num_buckets: Optional[int] = None,
|
| 295 |
+
rel_pos_max_distance: Optional[int] = None,
|
| 296 |
+
context_features: Optional[int] = None,
|
| 297 |
+
context_embedding_features: Optional[int] = None,
|
| 298 |
+
embedding_max_length: int = 512,
|
| 299 |
+
):
|
| 300 |
+
super().__init__()
|
| 301 |
+
|
| 302 |
+
self.blocks = nn.ModuleList(
|
| 303 |
+
[
|
| 304 |
+
TransformerBlock(
|
| 305 |
+
features=channels + context_embedding_features,
|
| 306 |
+
head_features=head_features,
|
| 307 |
+
num_heads=num_heads,
|
| 308 |
+
multiplier=multiplier,
|
| 309 |
+
use_rel_pos=use_rel_pos,
|
| 310 |
+
rel_pos_num_buckets=rel_pos_num_buckets,
|
| 311 |
+
rel_pos_max_distance=rel_pos_max_distance,
|
| 312 |
+
)
|
| 313 |
+
for i in range(num_layers)
|
| 314 |
+
]
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
self.to_out = nn.Sequential(
|
| 318 |
+
Rearrange("b t c -> b c t"),
|
| 319 |
+
nn.Conv1d(
|
| 320 |
+
in_channels=channels + context_embedding_features,
|
| 321 |
+
out_channels=channels,
|
| 322 |
+
kernel_size=1,
|
| 323 |
+
),
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
use_context_features = exists(context_features)
|
| 327 |
+
self.use_context_features = use_context_features
|
| 328 |
+
self.use_context_time = use_context_time
|
| 329 |
+
|
| 330 |
+
if use_context_time or use_context_features:
|
| 331 |
+
context_mapping_features = channels + context_embedding_features
|
| 332 |
+
|
| 333 |
+
self.to_mapping = nn.Sequential(
|
| 334 |
+
nn.Linear(context_mapping_features, context_mapping_features),
|
| 335 |
+
nn.GELU(),
|
| 336 |
+
nn.Linear(context_mapping_features, context_mapping_features),
|
| 337 |
+
nn.GELU(),
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
if use_context_time:
|
| 341 |
+
assert exists(context_mapping_features)
|
| 342 |
+
self.to_time = nn.Sequential(
|
| 343 |
+
TimePositionalEmbedding(
|
| 344 |
+
dim=channels, out_features=context_mapping_features
|
| 345 |
+
),
|
| 346 |
+
nn.GELU(),
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
if use_context_features:
|
| 350 |
+
assert exists(context_features) and exists(context_mapping_features)
|
| 351 |
+
self.to_features = nn.Sequential(
|
| 352 |
+
nn.Linear(
|
| 353 |
+
in_features=context_features, out_features=context_mapping_features
|
| 354 |
+
),
|
| 355 |
+
nn.GELU(),
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
self.fixed_embedding = FixedEmbedding(
|
| 359 |
+
max_length=embedding_max_length, features=context_embedding_features
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def get_mapping(
|
| 364 |
+
self, time: Optional[Tensor] = None, features: Optional[Tensor] = None
|
| 365 |
+
) -> Optional[Tensor]:
|
| 366 |
+
"""Combines context time features and features into mapping"""
|
| 367 |
+
items, mapping = [], None
|
| 368 |
+
# Compute time features
|
| 369 |
+
if self.use_context_time:
|
| 370 |
+
assert_message = "use_context_time=True but no time features provided"
|
| 371 |
+
assert exists(time), assert_message
|
| 372 |
+
items += [self.to_time(time)]
|
| 373 |
+
# Compute features
|
| 374 |
+
if self.use_context_features:
|
| 375 |
+
assert_message = "context_features exists but no features provided"
|
| 376 |
+
assert exists(features), assert_message
|
| 377 |
+
items += [self.to_features(features)]
|
| 378 |
+
|
| 379 |
+
# Compute joint mapping
|
| 380 |
+
if self.use_context_time or self.use_context_features:
|
| 381 |
+
mapping = reduce(torch.stack(items), "n b m -> b m", "sum")
|
| 382 |
+
mapping = self.to_mapping(mapping)
|
| 383 |
+
|
| 384 |
+
return mapping
|
| 385 |
+
|
| 386 |
+
def run(self, x, time, embedding, features):
|
| 387 |
+
|
| 388 |
+
mapping = self.get_mapping(time, features)
|
| 389 |
+
x = torch.cat([x.expand(-1, embedding.size(1), -1), embedding], axis=-1)
|
| 390 |
+
mapping = mapping.unsqueeze(1).expand(-1, embedding.size(1), -1)
|
| 391 |
+
|
| 392 |
+
for block in self.blocks:
|
| 393 |
+
x = x + mapping
|
| 394 |
+
x = block(x)
|
| 395 |
+
|
| 396 |
+
x = x.mean(axis=1).unsqueeze(1)
|
| 397 |
+
x = self.to_out(x)
|
| 398 |
+
x = x.transpose(-1, -2)
|
| 399 |
+
|
| 400 |
+
return x
|
| 401 |
+
|
| 402 |
+
def forward(self, x: Tensor,
|
| 403 |
+
time: Tensor,
|
| 404 |
+
embedding_mask_proba: float = 0.0,
|
| 405 |
+
embedding: Optional[Tensor] = None,
|
| 406 |
+
features: Optional[Tensor] = None,
|
| 407 |
+
embedding_scale: float = 1.0) -> Tensor:
|
| 408 |
+
|
| 409 |
+
b, device = embedding.shape[0], embedding.device
|
| 410 |
+
fixed_embedding = self.fixed_embedding(embedding)
|
| 411 |
+
if embedding_mask_proba > 0.0:
|
| 412 |
+
# Randomly mask embedding
|
| 413 |
+
batch_mask = rand_bool(
|
| 414 |
+
shape=(b, 1, 1), proba=embedding_mask_proba, device=device
|
| 415 |
+
)
|
| 416 |
+
embedding = torch.where(batch_mask, fixed_embedding, embedding)
|
| 417 |
+
|
| 418 |
+
if embedding_scale != 1.0:
|
| 419 |
+
# Compute both normal and fixed embedding outputs
|
| 420 |
+
out = self.run(x, time, embedding=embedding, features=features)
|
| 421 |
+
out_masked = self.run(x, time, embedding=fixed_embedding, features=features)
|
| 422 |
+
# Scale conditional output using classifier-free guidance
|
| 423 |
+
return out_masked + (out - out_masked) * embedding_scale
|
| 424 |
+
else:
|
| 425 |
+
return self.run(x, time, embedding=embedding, features=features)
|
| 426 |
+
|
| 427 |
+
return x
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
"""
|
| 431 |
+
Attention Components
|
| 432 |
+
"""
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
class RelativePositionBias(nn.Module):
|
| 436 |
+
def __init__(self, num_buckets: int, max_distance: int, num_heads: int):
|
| 437 |
+
super().__init__()
|
| 438 |
+
self.num_buckets = num_buckets
|
| 439 |
+
self.max_distance = max_distance
|
| 440 |
+
self.num_heads = num_heads
|
| 441 |
+
self.relative_attention_bias = nn.Embedding(num_buckets, num_heads)
|
| 442 |
+
|
| 443 |
+
@staticmethod
|
| 444 |
+
def _relative_position_bucket(
|
| 445 |
+
relative_position: Tensor, num_buckets: int, max_distance: int
|
| 446 |
+
):
|
| 447 |
+
num_buckets //= 2
|
| 448 |
+
ret = (relative_position >= 0).to(torch.long) * num_buckets
|
| 449 |
+
n = torch.abs(relative_position)
|
| 450 |
+
|
| 451 |
+
max_exact = num_buckets // 2
|
| 452 |
+
is_small = n < max_exact
|
| 453 |
+
|
| 454 |
+
val_if_large = (
|
| 455 |
+
max_exact
|
| 456 |
+
+ (
|
| 457 |
+
torch.log(n.float() / max_exact)
|
| 458 |
+
/ log(max_distance / max_exact)
|
| 459 |
+
* (num_buckets - max_exact)
|
| 460 |
+
).long()
|
| 461 |
+
)
|
| 462 |
+
val_if_large = torch.min(
|
| 463 |
+
val_if_large, torch.full_like(val_if_large, num_buckets - 1)
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
ret += torch.where(is_small, n, val_if_large)
|
| 467 |
+
return ret
|
| 468 |
+
|
| 469 |
+
def forward(self, num_queries: int, num_keys: int) -> Tensor:
|
| 470 |
+
i, j, device = num_queries, num_keys, self.relative_attention_bias.weight.device
|
| 471 |
+
q_pos = torch.arange(j - i, j, dtype=torch.long, device=device)
|
| 472 |
+
k_pos = torch.arange(j, dtype=torch.long, device=device)
|
| 473 |
+
rel_pos = rearrange(k_pos, "j -> 1 j") - rearrange(q_pos, "i -> i 1")
|
| 474 |
+
|
| 475 |
+
relative_position_bucket = self._relative_position_bucket(
|
| 476 |
+
rel_pos, num_buckets=self.num_buckets, max_distance=self.max_distance
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
bias = self.relative_attention_bias(relative_position_bucket)
|
| 480 |
+
bias = rearrange(bias, "m n h -> 1 h m n")
|
| 481 |
+
return bias
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
def FeedForward(features: int, multiplier: int) -> nn.Module:
|
| 485 |
+
mid_features = features * multiplier
|
| 486 |
+
return nn.Sequential(
|
| 487 |
+
nn.Linear(in_features=features, out_features=mid_features),
|
| 488 |
+
nn.GELU(),
|
| 489 |
+
nn.Linear(in_features=mid_features, out_features=features),
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
class AttentionBase(nn.Module):
|
| 494 |
+
def __init__(
|
| 495 |
+
self,
|
| 496 |
+
features: int,
|
| 497 |
+
*,
|
| 498 |
+
head_features: int,
|
| 499 |
+
num_heads: int,
|
| 500 |
+
use_rel_pos: bool,
|
| 501 |
+
out_features: Optional[int] = None,
|
| 502 |
+
rel_pos_num_buckets: Optional[int] = None,
|
| 503 |
+
rel_pos_max_distance: Optional[int] = None,
|
| 504 |
+
):
|
| 505 |
+
super().__init__()
|
| 506 |
+
self.scale = head_features ** -0.5
|
| 507 |
+
self.num_heads = num_heads
|
| 508 |
+
self.use_rel_pos = use_rel_pos
|
| 509 |
+
mid_features = head_features * num_heads
|
| 510 |
+
|
| 511 |
+
if use_rel_pos:
|
| 512 |
+
assert exists(rel_pos_num_buckets) and exists(rel_pos_max_distance)
|
| 513 |
+
self.rel_pos = RelativePositionBias(
|
| 514 |
+
num_buckets=rel_pos_num_buckets,
|
| 515 |
+
max_distance=rel_pos_max_distance,
|
| 516 |
+
num_heads=num_heads,
|
| 517 |
+
)
|
| 518 |
+
if out_features is None:
|
| 519 |
+
out_features = features
|
| 520 |
+
|
| 521 |
+
self.to_out = nn.Linear(in_features=mid_features, out_features=out_features)
|
| 522 |
+
|
| 523 |
+
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
|
| 524 |
+
# Split heads
|
| 525 |
+
q, k, v = rearrange_many((q, k, v), "b n (h d) -> b h n d", h=self.num_heads)
|
| 526 |
+
# Compute similarity matrix
|
| 527 |
+
sim = einsum("... n d, ... m d -> ... n m", q, k)
|
| 528 |
+
sim = (sim + self.rel_pos(*sim.shape[-2:])) if self.use_rel_pos else sim
|
| 529 |
+
sim = sim * self.scale
|
| 530 |
+
# Get attention matrix with softmax
|
| 531 |
+
attn = sim.softmax(dim=-1)
|
| 532 |
+
# Compute values
|
| 533 |
+
out = einsum("... n m, ... m d -> ... n d", attn, v)
|
| 534 |
+
out = rearrange(out, "b h n d -> b n (h d)")
|
| 535 |
+
return self.to_out(out)
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
class Attention(nn.Module):
|
| 539 |
+
def __init__(
|
| 540 |
+
self,
|
| 541 |
+
features: int,
|
| 542 |
+
*,
|
| 543 |
+
head_features: int,
|
| 544 |
+
num_heads: int,
|
| 545 |
+
out_features: Optional[int] = None,
|
| 546 |
+
context_features: Optional[int] = None,
|
| 547 |
+
use_rel_pos: bool,
|
| 548 |
+
rel_pos_num_buckets: Optional[int] = None,
|
| 549 |
+
rel_pos_max_distance: Optional[int] = None,
|
| 550 |
+
):
|
| 551 |
+
super().__init__()
|
| 552 |
+
self.context_features = context_features
|
| 553 |
+
mid_features = head_features * num_heads
|
| 554 |
+
context_features = default(context_features, features)
|
| 555 |
+
|
| 556 |
+
self.norm = nn.LayerNorm(features)
|
| 557 |
+
self.norm_context = nn.LayerNorm(context_features)
|
| 558 |
+
self.to_q = nn.Linear(
|
| 559 |
+
in_features=features, out_features=mid_features, bias=False
|
| 560 |
+
)
|
| 561 |
+
self.to_kv = nn.Linear(
|
| 562 |
+
in_features=context_features, out_features=mid_features * 2, bias=False
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
self.attention = AttentionBase(
|
| 566 |
+
features,
|
| 567 |
+
out_features=out_features,
|
| 568 |
+
num_heads=num_heads,
|
| 569 |
+
head_features=head_features,
|
| 570 |
+
use_rel_pos=use_rel_pos,
|
| 571 |
+
rel_pos_num_buckets=rel_pos_num_buckets,
|
| 572 |
+
rel_pos_max_distance=rel_pos_max_distance,
|
| 573 |
+
)
|
| 574 |
+
|
| 575 |
+
def forward(self, x: Tensor, *, context: Optional[Tensor] = None) -> Tensor:
|
| 576 |
+
assert_message = "You must provide a context when using context_features"
|
| 577 |
+
assert not self.context_features or exists(context), assert_message
|
| 578 |
+
# Use context if provided
|
| 579 |
+
context = default(context, x)
|
| 580 |
+
# Normalize then compute q from input and k,v from context
|
| 581 |
+
x, context = self.norm(x), self.norm_context(context)
|
| 582 |
+
q, k, v = (self.to_q(x), *torch.chunk(self.to_kv(context), chunks=2, dim=-1))
|
| 583 |
+
# Compute and return attention
|
| 584 |
+
return self.attention(q, k, v)
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
"""
|
| 588 |
+
Transformer Blocks
|
| 589 |
+
"""
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
class TransformerBlock(nn.Module):
|
| 593 |
+
def __init__(
|
| 594 |
+
self,
|
| 595 |
+
features: int,
|
| 596 |
+
num_heads: int,
|
| 597 |
+
head_features: int,
|
| 598 |
+
multiplier: int,
|
| 599 |
+
use_rel_pos: bool,
|
| 600 |
+
rel_pos_num_buckets: Optional[int] = None,
|
| 601 |
+
rel_pos_max_distance: Optional[int] = None,
|
| 602 |
+
context_features: Optional[int] = None,
|
| 603 |
+
):
|
| 604 |
+
super().__init__()
|
| 605 |
+
|
| 606 |
+
self.use_cross_attention = exists(context_features) and context_features > 0
|
| 607 |
+
|
| 608 |
+
self.attention = Attention(
|
| 609 |
+
features=features,
|
| 610 |
+
num_heads=num_heads,
|
| 611 |
+
head_features=head_features,
|
| 612 |
+
use_rel_pos=use_rel_pos,
|
| 613 |
+
rel_pos_num_buckets=rel_pos_num_buckets,
|
| 614 |
+
rel_pos_max_distance=rel_pos_max_distance,
|
| 615 |
+
)
|
| 616 |
+
|
| 617 |
+
if self.use_cross_attention:
|
| 618 |
+
self.cross_attention = Attention(
|
| 619 |
+
features=features,
|
| 620 |
+
num_heads=num_heads,
|
| 621 |
+
head_features=head_features,
|
| 622 |
+
context_features=context_features,
|
| 623 |
+
use_rel_pos=use_rel_pos,
|
| 624 |
+
rel_pos_num_buckets=rel_pos_num_buckets,
|
| 625 |
+
rel_pos_max_distance=rel_pos_max_distance,
|
| 626 |
+
)
|
| 627 |
+
|
| 628 |
+
self.feed_forward = FeedForward(features=features, multiplier=multiplier)
|
| 629 |
+
|
| 630 |
+
def forward(self, x: Tensor, *, context: Optional[Tensor] = None) -> Tensor:
|
| 631 |
+
x = self.attention(x) + x
|
| 632 |
+
if self.use_cross_attention:
|
| 633 |
+
x = self.cross_attention(x, context=context) + x
|
| 634 |
+
x = self.feed_forward(x) + x
|
| 635 |
+
return x
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
"""
|
| 640 |
+
Time Embeddings
|
| 641 |
+
"""
|
| 642 |
+
|
| 643 |
+
|
| 644 |
+
class SinusoidalEmbedding(nn.Module):
|
| 645 |
+
def __init__(self, dim: int):
|
| 646 |
+
super().__init__()
|
| 647 |
+
self.dim = dim
|
| 648 |
+
|
| 649 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 650 |
+
device, half_dim = x.device, self.dim // 2
|
| 651 |
+
emb = torch.tensor(log(10000) / (half_dim - 1), device=device)
|
| 652 |
+
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
|
| 653 |
+
emb = rearrange(x, "i -> i 1") * rearrange(emb, "j -> 1 j")
|
| 654 |
+
return torch.cat((emb.sin(), emb.cos()), dim=-1)
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
class LearnedPositionalEmbedding(nn.Module):
|
| 658 |
+
"""Used for continuous time"""
|
| 659 |
+
|
| 660 |
+
def __init__(self, dim: int):
|
| 661 |
+
super().__init__()
|
| 662 |
+
assert (dim % 2) == 0
|
| 663 |
+
half_dim = dim // 2
|
| 664 |
+
self.weights = nn.Parameter(torch.randn(half_dim))
|
| 665 |
+
|
| 666 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 667 |
+
x = rearrange(x, "b -> b 1")
|
| 668 |
+
freqs = x * rearrange(self.weights, "d -> 1 d") * 2 * pi
|
| 669 |
+
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim=-1)
|
| 670 |
+
fouriered = torch.cat((x, fouriered), dim=-1)
|
| 671 |
+
return fouriered
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
def TimePositionalEmbedding(dim: int, out_features: int) -> nn.Module:
|
| 675 |
+
return nn.Sequential(
|
| 676 |
+
LearnedPositionalEmbedding(dim),
|
| 677 |
+
nn.Linear(in_features=dim + 1, out_features=out_features),
|
| 678 |
+
)
|
| 679 |
+
|
| 680 |
+
class FixedEmbedding(nn.Module):
|
| 681 |
+
def __init__(self, max_length: int, features: int):
|
| 682 |
+
super().__init__()
|
| 683 |
+
self.max_length = max_length
|
| 684 |
+
self.embedding = nn.Embedding(max_length, features)
|
| 685 |
+
|
| 686 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 687 |
+
batch_size, length, device = *x.shape[0:2], x.device
|
| 688 |
+
assert_message = "Input sequence length must be <= max_length"
|
| 689 |
+
assert length <= self.max_length, assert_message
|
| 690 |
+
position = torch.arange(length, device=device)
|
| 691 |
+
fixed_embedding = self.embedding(position)
|
| 692 |
+
fixed_embedding = repeat(fixed_embedding, "n d -> b n d", b=batch_size)
|
| 693 |
+
return fixed_embedding
|
Modules/diffusion/sampler.py
ADDED
|
@@ -0,0 +1,691 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from math import atan, cos, pi, sin, sqrt
|
| 2 |
+
from typing import Any, Callable, List, Optional, Tuple, Type
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from einops import rearrange, reduce
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
|
| 10 |
+
from .utils import *
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
Diffusion Training
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
""" Distributions """
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class Distribution:
|
| 20 |
+
def __call__(self, num_samples: int, device: torch.device):
|
| 21 |
+
raise NotImplementedError()
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class LogNormalDistribution(Distribution):
|
| 25 |
+
def __init__(self, mean: float, std: float):
|
| 26 |
+
self.mean = mean
|
| 27 |
+
self.std = std
|
| 28 |
+
|
| 29 |
+
def __call__(
|
| 30 |
+
self, num_samples: int, device: torch.device = torch.device("cpu")
|
| 31 |
+
) -> Tensor:
|
| 32 |
+
normal = self.mean + self.std * torch.randn((num_samples,), device=device)
|
| 33 |
+
return normal.exp()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class UniformDistribution(Distribution):
|
| 37 |
+
def __call__(self, num_samples: int, device: torch.device = torch.device("cpu")):
|
| 38 |
+
return torch.rand(num_samples, device=device)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class VKDistribution(Distribution):
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
min_value: float = 0.0,
|
| 45 |
+
max_value: float = float("inf"),
|
| 46 |
+
sigma_data: float = 1.0,
|
| 47 |
+
):
|
| 48 |
+
self.min_value = min_value
|
| 49 |
+
self.max_value = max_value
|
| 50 |
+
self.sigma_data = sigma_data
|
| 51 |
+
|
| 52 |
+
def __call__(
|
| 53 |
+
self, num_samples: int, device: torch.device = torch.device("cpu")
|
| 54 |
+
) -> Tensor:
|
| 55 |
+
sigma_data = self.sigma_data
|
| 56 |
+
min_cdf = atan(self.min_value / sigma_data) * 2 / pi
|
| 57 |
+
max_cdf = atan(self.max_value / sigma_data) * 2 / pi
|
| 58 |
+
u = (max_cdf - min_cdf) * torch.randn((num_samples,), device=device) + min_cdf
|
| 59 |
+
return torch.tan(u * pi / 2) * sigma_data
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
""" Diffusion Classes """
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def pad_dims(x: Tensor, ndim: int) -> Tensor:
|
| 66 |
+
# Pads additional ndims to the right of the tensor
|
| 67 |
+
return x.view(*x.shape, *((1,) * ndim))
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def clip(x: Tensor, dynamic_threshold: float = 0.0):
|
| 71 |
+
if dynamic_threshold == 0.0:
|
| 72 |
+
return x.clamp(-1.0, 1.0)
|
| 73 |
+
else:
|
| 74 |
+
# Dynamic thresholding
|
| 75 |
+
# Find dynamic threshold quantile for each batch
|
| 76 |
+
x_flat = rearrange(x, "b ... -> b (...)")
|
| 77 |
+
scale = torch.quantile(x_flat.abs(), dynamic_threshold, dim=-1)
|
| 78 |
+
# Clamp to a min of 1.0
|
| 79 |
+
scale.clamp_(min=1.0)
|
| 80 |
+
# Clamp all values and scale
|
| 81 |
+
scale = pad_dims(scale, ndim=x.ndim - scale.ndim)
|
| 82 |
+
x = x.clamp(-scale, scale) / scale
|
| 83 |
+
return x
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def to_batch(
|
| 87 |
+
batch_size: int,
|
| 88 |
+
device: torch.device,
|
| 89 |
+
x: Optional[float] = None,
|
| 90 |
+
xs: Optional[Tensor] = None,
|
| 91 |
+
) -> Tensor:
|
| 92 |
+
assert exists(x) ^ exists(xs), "Either x or xs must be provided"
|
| 93 |
+
# If x provided use the same for all batch items
|
| 94 |
+
if exists(x):
|
| 95 |
+
xs = torch.full(size=(batch_size,), fill_value=x).to(device)
|
| 96 |
+
assert exists(xs)
|
| 97 |
+
return xs
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class Diffusion(nn.Module):
|
| 101 |
+
|
| 102 |
+
alias: str = ""
|
| 103 |
+
|
| 104 |
+
"""Base diffusion class"""
|
| 105 |
+
|
| 106 |
+
def denoise_fn(
|
| 107 |
+
self,
|
| 108 |
+
x_noisy: Tensor,
|
| 109 |
+
sigmas: Optional[Tensor] = None,
|
| 110 |
+
sigma: Optional[float] = None,
|
| 111 |
+
**kwargs,
|
| 112 |
+
) -> Tensor:
|
| 113 |
+
raise NotImplementedError("Diffusion class missing denoise_fn")
|
| 114 |
+
|
| 115 |
+
def forward(self, x: Tensor, noise: Tensor = None, **kwargs) -> Tensor:
|
| 116 |
+
raise NotImplementedError("Diffusion class missing forward function")
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class VDiffusion(Diffusion):
|
| 120 |
+
|
| 121 |
+
alias = "v"
|
| 122 |
+
|
| 123 |
+
def __init__(self, net: nn.Module, *, sigma_distribution: Distribution):
|
| 124 |
+
super().__init__()
|
| 125 |
+
self.net = net
|
| 126 |
+
self.sigma_distribution = sigma_distribution
|
| 127 |
+
|
| 128 |
+
def get_alpha_beta(self, sigmas: Tensor) -> Tuple[Tensor, Tensor]:
|
| 129 |
+
angle = sigmas * pi / 2
|
| 130 |
+
alpha = torch.cos(angle)
|
| 131 |
+
beta = torch.sin(angle)
|
| 132 |
+
return alpha, beta
|
| 133 |
+
|
| 134 |
+
def denoise_fn(
|
| 135 |
+
self,
|
| 136 |
+
x_noisy: Tensor,
|
| 137 |
+
sigmas: Optional[Tensor] = None,
|
| 138 |
+
sigma: Optional[float] = None,
|
| 139 |
+
**kwargs,
|
| 140 |
+
) -> Tensor:
|
| 141 |
+
batch_size, device = x_noisy.shape[0], x_noisy.device
|
| 142 |
+
sigmas = to_batch(x=sigma, xs=sigmas, batch_size=batch_size, device=device)
|
| 143 |
+
return self.net(x_noisy, sigmas, **kwargs)
|
| 144 |
+
|
| 145 |
+
def forward(self, x: Tensor, noise: Tensor = None, **kwargs) -> Tensor:
|
| 146 |
+
batch_size, device = x.shape[0], x.device
|
| 147 |
+
|
| 148 |
+
# Sample amount of noise to add for each batch element
|
| 149 |
+
sigmas = self.sigma_distribution(num_samples=batch_size, device=device)
|
| 150 |
+
sigmas_padded = rearrange(sigmas, "b -> b 1 1")
|
| 151 |
+
|
| 152 |
+
# Get noise
|
| 153 |
+
noise = default(noise, lambda: torch.randn_like(x))
|
| 154 |
+
|
| 155 |
+
# Combine input and noise weighted by half-circle
|
| 156 |
+
alpha, beta = self.get_alpha_beta(sigmas_padded)
|
| 157 |
+
x_noisy = x * alpha + noise * beta
|
| 158 |
+
x_target = noise * alpha - x * beta
|
| 159 |
+
|
| 160 |
+
# Denoise and return loss
|
| 161 |
+
x_denoised = self.denoise_fn(x_noisy, sigmas, **kwargs)
|
| 162 |
+
return F.mse_loss(x_denoised, x_target)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class KDiffusion(Diffusion):
|
| 166 |
+
"""Elucidated Diffusion (Karras et al. 2022): https://arxiv.org/abs/2206.00364"""
|
| 167 |
+
|
| 168 |
+
alias = "k"
|
| 169 |
+
|
| 170 |
+
def __init__(
|
| 171 |
+
self,
|
| 172 |
+
net: nn.Module,
|
| 173 |
+
*,
|
| 174 |
+
sigma_distribution: Distribution,
|
| 175 |
+
sigma_data: float, # data distribution standard deviation
|
| 176 |
+
dynamic_threshold: float = 0.0,
|
| 177 |
+
):
|
| 178 |
+
super().__init__()
|
| 179 |
+
self.net = net
|
| 180 |
+
self.sigma_data = sigma_data
|
| 181 |
+
self.sigma_distribution = sigma_distribution
|
| 182 |
+
self.dynamic_threshold = dynamic_threshold
|
| 183 |
+
|
| 184 |
+
def get_scale_weights(self, sigmas: Tensor) -> Tuple[Tensor, ...]:
|
| 185 |
+
sigma_data = self.sigma_data
|
| 186 |
+
c_noise = torch.log(sigmas) * 0.25
|
| 187 |
+
sigmas = rearrange(sigmas, "b -> b 1 1")
|
| 188 |
+
c_skip = (sigma_data ** 2) / (sigmas ** 2 + sigma_data ** 2)
|
| 189 |
+
c_out = sigmas * sigma_data * (sigma_data ** 2 + sigmas ** 2) ** -0.5
|
| 190 |
+
c_in = (sigmas ** 2 + sigma_data ** 2) ** -0.5
|
| 191 |
+
return c_skip, c_out, c_in, c_noise
|
| 192 |
+
|
| 193 |
+
def denoise_fn(
|
| 194 |
+
self,
|
| 195 |
+
x_noisy: Tensor,
|
| 196 |
+
sigmas: Optional[Tensor] = None,
|
| 197 |
+
sigma: Optional[float] = None,
|
| 198 |
+
**kwargs,
|
| 199 |
+
) -> Tensor:
|
| 200 |
+
batch_size, device = x_noisy.shape[0], x_noisy.device
|
| 201 |
+
sigmas = to_batch(x=sigma, xs=sigmas, batch_size=batch_size, device=device)
|
| 202 |
+
|
| 203 |
+
# Predict network output and add skip connection
|
| 204 |
+
c_skip, c_out, c_in, c_noise = self.get_scale_weights(sigmas)
|
| 205 |
+
x_pred = self.net(c_in * x_noisy, c_noise, **kwargs)
|
| 206 |
+
x_denoised = c_skip * x_noisy + c_out * x_pred
|
| 207 |
+
|
| 208 |
+
return x_denoised
|
| 209 |
+
|
| 210 |
+
def loss_weight(self, sigmas: Tensor) -> Tensor:
|
| 211 |
+
# Computes weight depending on data distribution
|
| 212 |
+
return (sigmas ** 2 + self.sigma_data ** 2) * (sigmas * self.sigma_data) ** -2
|
| 213 |
+
|
| 214 |
+
def forward(self, x: Tensor, noise: Tensor = None, **kwargs) -> Tensor:
|
| 215 |
+
batch_size, device = x.shape[0], x.device
|
| 216 |
+
from einops import rearrange, reduce
|
| 217 |
+
|
| 218 |
+
# Sample amount of noise to add for each batch element
|
| 219 |
+
sigmas = self.sigma_distribution(num_samples=batch_size, device=device)
|
| 220 |
+
sigmas_padded = rearrange(sigmas, "b -> b 1 1")
|
| 221 |
+
|
| 222 |
+
# Add noise to input
|
| 223 |
+
noise = default(noise, lambda: torch.randn_like(x))
|
| 224 |
+
x_noisy = x + sigmas_padded * noise
|
| 225 |
+
|
| 226 |
+
# Compute denoised values
|
| 227 |
+
x_denoised = self.denoise_fn(x_noisy, sigmas=sigmas, **kwargs)
|
| 228 |
+
|
| 229 |
+
# Compute weighted loss
|
| 230 |
+
losses = F.mse_loss(x_denoised, x, reduction="none")
|
| 231 |
+
losses = reduce(losses, "b ... -> b", "mean")
|
| 232 |
+
losses = losses * self.loss_weight(sigmas)
|
| 233 |
+
loss = losses.mean()
|
| 234 |
+
return loss
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
class VKDiffusion(Diffusion):
|
| 238 |
+
|
| 239 |
+
alias = "vk"
|
| 240 |
+
|
| 241 |
+
def __init__(self, net: nn.Module, *, sigma_distribution: Distribution):
|
| 242 |
+
super().__init__()
|
| 243 |
+
self.net = net
|
| 244 |
+
self.sigma_distribution = sigma_distribution
|
| 245 |
+
|
| 246 |
+
def get_scale_weights(self, sigmas: Tensor) -> Tuple[Tensor, ...]:
|
| 247 |
+
sigma_data = 1.0
|
| 248 |
+
sigmas = rearrange(sigmas, "b -> b 1 1")
|
| 249 |
+
c_skip = (sigma_data ** 2) / (sigmas ** 2 + sigma_data ** 2)
|
| 250 |
+
c_out = -sigmas * sigma_data * (sigma_data ** 2 + sigmas ** 2) ** -0.5
|
| 251 |
+
c_in = (sigmas ** 2 + sigma_data ** 2) ** -0.5
|
| 252 |
+
return c_skip, c_out, c_in
|
| 253 |
+
|
| 254 |
+
def sigma_to_t(self, sigmas: Tensor) -> Tensor:
|
| 255 |
+
return sigmas.atan() / pi * 2
|
| 256 |
+
|
| 257 |
+
def t_to_sigma(self, t: Tensor) -> Tensor:
|
| 258 |
+
return (t * pi / 2).tan()
|
| 259 |
+
|
| 260 |
+
def denoise_fn(
|
| 261 |
+
self,
|
| 262 |
+
x_noisy: Tensor,
|
| 263 |
+
sigmas: Optional[Tensor] = None,
|
| 264 |
+
sigma: Optional[float] = None,
|
| 265 |
+
**kwargs,
|
| 266 |
+
) -> Tensor:
|
| 267 |
+
batch_size, device = x_noisy.shape[0], x_noisy.device
|
| 268 |
+
sigmas = to_batch(x=sigma, xs=sigmas, batch_size=batch_size, device=device)
|
| 269 |
+
|
| 270 |
+
# Predict network output and add skip connection
|
| 271 |
+
c_skip, c_out, c_in = self.get_scale_weights(sigmas)
|
| 272 |
+
x_pred = self.net(c_in * x_noisy, self.sigma_to_t(sigmas), **kwargs)
|
| 273 |
+
x_denoised = c_skip * x_noisy + c_out * x_pred
|
| 274 |
+
return x_denoised
|
| 275 |
+
|
| 276 |
+
def forward(self, x: Tensor, noise: Tensor = None, **kwargs) -> Tensor:
|
| 277 |
+
batch_size, device = x.shape[0], x.device
|
| 278 |
+
|
| 279 |
+
# Sample amount of noise to add for each batch element
|
| 280 |
+
sigmas = self.sigma_distribution(num_samples=batch_size, device=device)
|
| 281 |
+
sigmas_padded = rearrange(sigmas, "b -> b 1 1")
|
| 282 |
+
|
| 283 |
+
# Add noise to input
|
| 284 |
+
noise = default(noise, lambda: torch.randn_like(x))
|
| 285 |
+
x_noisy = x + sigmas_padded * noise
|
| 286 |
+
|
| 287 |
+
# Compute model output
|
| 288 |
+
c_skip, c_out, c_in = self.get_scale_weights(sigmas)
|
| 289 |
+
x_pred = self.net(c_in * x_noisy, self.sigma_to_t(sigmas), **kwargs)
|
| 290 |
+
|
| 291 |
+
# Compute v-objective target
|
| 292 |
+
v_target = (x - c_skip * x_noisy) / (c_out + 1e-7)
|
| 293 |
+
|
| 294 |
+
# Compute loss
|
| 295 |
+
loss = F.mse_loss(x_pred, v_target)
|
| 296 |
+
return loss
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
"""
|
| 300 |
+
Diffusion Sampling
|
| 301 |
+
"""
|
| 302 |
+
|
| 303 |
+
""" Schedules """
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
class Schedule(nn.Module):
|
| 307 |
+
"""Interface used by different sampling schedules"""
|
| 308 |
+
|
| 309 |
+
def forward(self, num_steps: int, device: torch.device) -> Tensor:
|
| 310 |
+
raise NotImplementedError()
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
class LinearSchedule(Schedule):
|
| 314 |
+
def forward(self, num_steps: int, device: Any) -> Tensor:
|
| 315 |
+
sigmas = torch.linspace(1, 0, num_steps + 1)[:-1]
|
| 316 |
+
return sigmas
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
class KarrasSchedule(Schedule):
|
| 320 |
+
"""https://arxiv.org/abs/2206.00364 equation 5"""
|
| 321 |
+
|
| 322 |
+
def __init__(self, sigma_min: float, sigma_max: float, rho: float = 7.0):
|
| 323 |
+
super().__init__()
|
| 324 |
+
self.sigma_min = sigma_min
|
| 325 |
+
self.sigma_max = sigma_max
|
| 326 |
+
self.rho = rho
|
| 327 |
+
|
| 328 |
+
def forward(self, num_steps: int, device: Any) -> Tensor:
|
| 329 |
+
rho_inv = 1.0 / self.rho
|
| 330 |
+
steps = torch.arange(num_steps, device=device, dtype=torch.float32)
|
| 331 |
+
sigmas = (
|
| 332 |
+
self.sigma_max ** rho_inv
|
| 333 |
+
+ (steps / (num_steps - 1))
|
| 334 |
+
* (self.sigma_min ** rho_inv - self.sigma_max ** rho_inv)
|
| 335 |
+
) ** self.rho
|
| 336 |
+
sigmas = F.pad(sigmas, pad=(0, 1), value=0.0)
|
| 337 |
+
return sigmas
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
""" Samplers """
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
class Sampler(nn.Module):
|
| 344 |
+
|
| 345 |
+
diffusion_types: List[Type[Diffusion]] = []
|
| 346 |
+
|
| 347 |
+
def forward(
|
| 348 |
+
self, noise: Tensor, fn: Callable, sigmas: Tensor, num_steps: int
|
| 349 |
+
) -> Tensor:
|
| 350 |
+
raise NotImplementedError()
|
| 351 |
+
|
| 352 |
+
def inpaint(
|
| 353 |
+
self,
|
| 354 |
+
source: Tensor,
|
| 355 |
+
mask: Tensor,
|
| 356 |
+
fn: Callable,
|
| 357 |
+
sigmas: Tensor,
|
| 358 |
+
num_steps: int,
|
| 359 |
+
num_resamples: int,
|
| 360 |
+
) -> Tensor:
|
| 361 |
+
raise NotImplementedError("Inpainting not available with current sampler")
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class VSampler(Sampler):
|
| 365 |
+
|
| 366 |
+
diffusion_types = [VDiffusion]
|
| 367 |
+
|
| 368 |
+
def get_alpha_beta(self, sigma: float) -> Tuple[float, float]:
|
| 369 |
+
angle = sigma * pi / 2
|
| 370 |
+
alpha = cos(angle)
|
| 371 |
+
beta = sin(angle)
|
| 372 |
+
return alpha, beta
|
| 373 |
+
|
| 374 |
+
def forward(
|
| 375 |
+
self, noise: Tensor, fn: Callable, sigmas: Tensor, num_steps: int
|
| 376 |
+
) -> Tensor:
|
| 377 |
+
x = sigmas[0] * noise
|
| 378 |
+
alpha, beta = self.get_alpha_beta(sigmas[0].item())
|
| 379 |
+
|
| 380 |
+
for i in range(num_steps - 1):
|
| 381 |
+
is_last = i == num_steps - 1
|
| 382 |
+
|
| 383 |
+
x_denoised = fn(x, sigma=sigmas[i])
|
| 384 |
+
x_pred = x * alpha - x_denoised * beta
|
| 385 |
+
x_eps = x * beta + x_denoised * alpha
|
| 386 |
+
|
| 387 |
+
if not is_last:
|
| 388 |
+
alpha, beta = self.get_alpha_beta(sigmas[i + 1].item())
|
| 389 |
+
x = x_pred * alpha + x_eps * beta
|
| 390 |
+
|
| 391 |
+
return x_pred
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
class KarrasSampler(Sampler):
|
| 395 |
+
"""https://arxiv.org/abs/2206.00364 algorithm 1"""
|
| 396 |
+
|
| 397 |
+
diffusion_types = [KDiffusion, VKDiffusion]
|
| 398 |
+
|
| 399 |
+
def __init__(
|
| 400 |
+
self,
|
| 401 |
+
s_tmin: float = 0,
|
| 402 |
+
s_tmax: float = float("inf"),
|
| 403 |
+
s_churn: float = 0.0,
|
| 404 |
+
s_noise: float = 1.0,
|
| 405 |
+
):
|
| 406 |
+
super().__init__()
|
| 407 |
+
self.s_tmin = s_tmin
|
| 408 |
+
self.s_tmax = s_tmax
|
| 409 |
+
self.s_noise = s_noise
|
| 410 |
+
self.s_churn = s_churn
|
| 411 |
+
|
| 412 |
+
def step(
|
| 413 |
+
self, x: Tensor, fn: Callable, sigma: float, sigma_next: float, gamma: float
|
| 414 |
+
) -> Tensor:
|
| 415 |
+
"""Algorithm 2 (step)"""
|
| 416 |
+
# Select temporarily increased noise level
|
| 417 |
+
sigma_hat = sigma + gamma * sigma
|
| 418 |
+
# Add noise to move from sigma to sigma_hat
|
| 419 |
+
epsilon = self.s_noise * torch.randn_like(x)
|
| 420 |
+
x_hat = x + sqrt(sigma_hat ** 2 - sigma ** 2) * epsilon
|
| 421 |
+
# Evaluate ∂x/∂sigma at sigma_hat
|
| 422 |
+
d = (x_hat - fn(x_hat, sigma=sigma_hat)) / sigma_hat
|
| 423 |
+
# Take euler step from sigma_hat to sigma_next
|
| 424 |
+
x_next = x_hat + (sigma_next - sigma_hat) * d
|
| 425 |
+
# Second order correction
|
| 426 |
+
if sigma_next != 0:
|
| 427 |
+
model_out_next = fn(x_next, sigma=sigma_next)
|
| 428 |
+
d_prime = (x_next - model_out_next) / sigma_next
|
| 429 |
+
x_next = x_hat + 0.5 * (sigma - sigma_hat) * (d + d_prime)
|
| 430 |
+
return x_next
|
| 431 |
+
|
| 432 |
+
def forward(
|
| 433 |
+
self, noise: Tensor, fn: Callable, sigmas: Tensor, num_steps: int
|
| 434 |
+
) -> Tensor:
|
| 435 |
+
x = sigmas[0] * noise
|
| 436 |
+
# Compute gammas
|
| 437 |
+
gammas = torch.where(
|
| 438 |
+
(sigmas >= self.s_tmin) & (sigmas <= self.s_tmax),
|
| 439 |
+
min(self.s_churn / num_steps, sqrt(2) - 1),
|
| 440 |
+
0.0,
|
| 441 |
+
)
|
| 442 |
+
# Denoise to sample
|
| 443 |
+
for i in range(num_steps - 1):
|
| 444 |
+
x = self.step(
|
| 445 |
+
x, fn=fn, sigma=sigmas[i], sigma_next=sigmas[i + 1], gamma=gammas[i] # type: ignore # noqa
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
return x
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
class AEulerSampler(Sampler):
|
| 452 |
+
|
| 453 |
+
diffusion_types = [KDiffusion, VKDiffusion]
|
| 454 |
+
|
| 455 |
+
def get_sigmas(self, sigma: float, sigma_next: float) -> Tuple[float, float]:
|
| 456 |
+
sigma_up = sqrt(sigma_next ** 2 * (sigma ** 2 - sigma_next ** 2) / sigma ** 2)
|
| 457 |
+
sigma_down = sqrt(sigma_next ** 2 - sigma_up ** 2)
|
| 458 |
+
return sigma_up, sigma_down
|
| 459 |
+
|
| 460 |
+
def step(self, x: Tensor, fn: Callable, sigma: float, sigma_next: float) -> Tensor:
|
| 461 |
+
# Sigma steps
|
| 462 |
+
sigma_up, sigma_down = self.get_sigmas(sigma, sigma_next)
|
| 463 |
+
# Derivative at sigma (∂x/∂sigma)
|
| 464 |
+
d = (x - fn(x, sigma=sigma)) / sigma
|
| 465 |
+
# Euler method
|
| 466 |
+
x_next = x + d * (sigma_down - sigma)
|
| 467 |
+
# Add randomness
|
| 468 |
+
x_next = x_next + torch.randn_like(x) * sigma_up
|
| 469 |
+
return x_next
|
| 470 |
+
|
| 471 |
+
def forward(
|
| 472 |
+
self, noise: Tensor, fn: Callable, sigmas: Tensor, num_steps: int
|
| 473 |
+
) -> Tensor:
|
| 474 |
+
x = sigmas[0] * noise
|
| 475 |
+
# Denoise to sample
|
| 476 |
+
for i in range(num_steps - 1):
|
| 477 |
+
x = self.step(x, fn=fn, sigma=sigmas[i], sigma_next=sigmas[i + 1]) # type: ignore # noqa
|
| 478 |
+
return x
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
class ADPM2Sampler(Sampler):
|
| 482 |
+
"""https://www.desmos.com/calculator/jbxjlqd9mb"""
|
| 483 |
+
|
| 484 |
+
diffusion_types = [KDiffusion, VKDiffusion]
|
| 485 |
+
|
| 486 |
+
def __init__(self, rho: float = 1.0):
|
| 487 |
+
super().__init__()
|
| 488 |
+
self.rho = rho
|
| 489 |
+
|
| 490 |
+
def get_sigmas(self, sigma: float, sigma_next: float) -> Tuple[float, float, float]:
|
| 491 |
+
r = self.rho
|
| 492 |
+
sigma_up = sqrt(sigma_next ** 2 * (sigma ** 2 - sigma_next ** 2) / sigma ** 2)
|
| 493 |
+
sigma_down = sqrt(sigma_next ** 2 - sigma_up ** 2)
|
| 494 |
+
sigma_mid = ((sigma ** (1 / r) + sigma_down ** (1 / r)) / 2) ** r
|
| 495 |
+
return sigma_up, sigma_down, sigma_mid
|
| 496 |
+
|
| 497 |
+
def step(self, x: Tensor, fn: Callable, sigma: float, sigma_next: float) -> Tensor:
|
| 498 |
+
# Sigma steps
|
| 499 |
+
sigma_up, sigma_down, sigma_mid = self.get_sigmas(sigma, sigma_next)
|
| 500 |
+
# Derivative at sigma (∂x/∂sigma)
|
| 501 |
+
d = (x - fn(x, sigma=sigma)) / sigma
|
| 502 |
+
# Denoise to midpoint
|
| 503 |
+
x_mid = x + d * (sigma_mid - sigma)
|
| 504 |
+
# Derivative at sigma_mid (∂x_mid/∂sigma_mid)
|
| 505 |
+
d_mid = (x_mid - fn(x_mid, sigma=sigma_mid)) / sigma_mid
|
| 506 |
+
# Denoise to next
|
| 507 |
+
x = x + d_mid * (sigma_down - sigma)
|
| 508 |
+
# Add randomness
|
| 509 |
+
x_next = x + torch.randn_like(x) * sigma_up
|
| 510 |
+
return x_next
|
| 511 |
+
|
| 512 |
+
def forward(
|
| 513 |
+
self, noise: Tensor, fn: Callable, sigmas: Tensor, num_steps: int
|
| 514 |
+
) -> Tensor:
|
| 515 |
+
x = sigmas[0] * noise
|
| 516 |
+
# Denoise to sample
|
| 517 |
+
for i in range(num_steps - 1):
|
| 518 |
+
x = self.step(x, fn=fn, sigma=sigmas[i], sigma_next=sigmas[i + 1]) # type: ignore # noqa
|
| 519 |
+
return x
|
| 520 |
+
|
| 521 |
+
def inpaint(
|
| 522 |
+
self,
|
| 523 |
+
source: Tensor,
|
| 524 |
+
mask: Tensor,
|
| 525 |
+
fn: Callable,
|
| 526 |
+
sigmas: Tensor,
|
| 527 |
+
num_steps: int,
|
| 528 |
+
num_resamples: int,
|
| 529 |
+
) -> Tensor:
|
| 530 |
+
x = sigmas[0] * torch.randn_like(source)
|
| 531 |
+
|
| 532 |
+
for i in range(num_steps - 1):
|
| 533 |
+
# Noise source to current noise level
|
| 534 |
+
source_noisy = source + sigmas[i] * torch.randn_like(source)
|
| 535 |
+
for r in range(num_resamples):
|
| 536 |
+
# Merge noisy source and current then denoise
|
| 537 |
+
x = source_noisy * mask + x * ~mask
|
| 538 |
+
x = self.step(x, fn=fn, sigma=sigmas[i], sigma_next=sigmas[i + 1]) # type: ignore # noqa
|
| 539 |
+
# Renoise if not last resample step
|
| 540 |
+
if r < num_resamples - 1:
|
| 541 |
+
sigma = sqrt(sigmas[i] ** 2 - sigmas[i + 1] ** 2)
|
| 542 |
+
x = x + sigma * torch.randn_like(x)
|
| 543 |
+
|
| 544 |
+
return source * mask + x * ~mask
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
""" Main Classes """
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
class DiffusionSampler(nn.Module):
|
| 551 |
+
def __init__(
|
| 552 |
+
self,
|
| 553 |
+
diffusion: Diffusion,
|
| 554 |
+
*,
|
| 555 |
+
sampler: Sampler,
|
| 556 |
+
sigma_schedule: Schedule,
|
| 557 |
+
num_steps: Optional[int] = None,
|
| 558 |
+
clamp: bool = True,
|
| 559 |
+
):
|
| 560 |
+
super().__init__()
|
| 561 |
+
self.denoise_fn = diffusion.denoise_fn
|
| 562 |
+
self.sampler = sampler
|
| 563 |
+
self.sigma_schedule = sigma_schedule
|
| 564 |
+
self.num_steps = num_steps
|
| 565 |
+
self.clamp = clamp
|
| 566 |
+
|
| 567 |
+
# Check sampler is compatible with diffusion type
|
| 568 |
+
sampler_class = sampler.__class__.__name__
|
| 569 |
+
diffusion_class = diffusion.__class__.__name__
|
| 570 |
+
message = f"{sampler_class} incompatible with {diffusion_class}"
|
| 571 |
+
assert diffusion.alias in [t.alias for t in sampler.diffusion_types], message
|
| 572 |
+
|
| 573 |
+
def forward(
|
| 574 |
+
self, noise: Tensor, num_steps: Optional[int] = None, **kwargs
|
| 575 |
+
) -> Tensor:
|
| 576 |
+
device = noise.device
|
| 577 |
+
num_steps = default(num_steps, self.num_steps) # type: ignore
|
| 578 |
+
assert exists(num_steps), "Parameter `num_steps` must be provided"
|
| 579 |
+
# Compute sigmas using schedule
|
| 580 |
+
sigmas = self.sigma_schedule(num_steps, device)
|
| 581 |
+
# Append additional kwargs to denoise function (used e.g. for conditional unet)
|
| 582 |
+
fn = lambda *a, **ka: self.denoise_fn(*a, **{**ka, **kwargs}) # noqa
|
| 583 |
+
# Sample using sampler
|
| 584 |
+
x = self.sampler(noise, fn=fn, sigmas=sigmas, num_steps=num_steps)
|
| 585 |
+
x = x.clamp(-1.0, 1.0) if self.clamp else x
|
| 586 |
+
return x
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
class DiffusionInpainter(nn.Module):
|
| 590 |
+
def __init__(
|
| 591 |
+
self,
|
| 592 |
+
diffusion: Diffusion,
|
| 593 |
+
*,
|
| 594 |
+
num_steps: int,
|
| 595 |
+
num_resamples: int,
|
| 596 |
+
sampler: Sampler,
|
| 597 |
+
sigma_schedule: Schedule,
|
| 598 |
+
):
|
| 599 |
+
super().__init__()
|
| 600 |
+
self.denoise_fn = diffusion.denoise_fn
|
| 601 |
+
self.num_steps = num_steps
|
| 602 |
+
self.num_resamples = num_resamples
|
| 603 |
+
self.inpaint_fn = sampler.inpaint
|
| 604 |
+
self.sigma_schedule = sigma_schedule
|
| 605 |
+
|
| 606 |
+
@torch.no_grad()
|
| 607 |
+
def forward(self, inpaint: Tensor, inpaint_mask: Tensor) -> Tensor:
|
| 608 |
+
x = self.inpaint_fn(
|
| 609 |
+
source=inpaint,
|
| 610 |
+
mask=inpaint_mask,
|
| 611 |
+
fn=self.denoise_fn,
|
| 612 |
+
sigmas=self.sigma_schedule(self.num_steps, inpaint.device),
|
| 613 |
+
num_steps=self.num_steps,
|
| 614 |
+
num_resamples=self.num_resamples,
|
| 615 |
+
)
|
| 616 |
+
return x
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
def sequential_mask(like: Tensor, start: int) -> Tensor:
|
| 620 |
+
length, device = like.shape[2], like.device
|
| 621 |
+
mask = torch.ones_like(like, dtype=torch.bool)
|
| 622 |
+
mask[:, :, start:] = torch.zeros((length - start,), device=device)
|
| 623 |
+
return mask
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
class SpanBySpanComposer(nn.Module):
|
| 627 |
+
def __init__(
|
| 628 |
+
self,
|
| 629 |
+
inpainter: DiffusionInpainter,
|
| 630 |
+
*,
|
| 631 |
+
num_spans: int,
|
| 632 |
+
):
|
| 633 |
+
super().__init__()
|
| 634 |
+
self.inpainter = inpainter
|
| 635 |
+
self.num_spans = num_spans
|
| 636 |
+
|
| 637 |
+
def forward(self, start: Tensor, keep_start: bool = False) -> Tensor:
|
| 638 |
+
half_length = start.shape[2] // 2
|
| 639 |
+
|
| 640 |
+
spans = list(start.chunk(chunks=2, dim=-1)) if keep_start else []
|
| 641 |
+
# Inpaint second half from first half
|
| 642 |
+
inpaint = torch.zeros_like(start)
|
| 643 |
+
inpaint[:, :, :half_length] = start[:, :, half_length:]
|
| 644 |
+
inpaint_mask = sequential_mask(like=start, start=half_length)
|
| 645 |
+
|
| 646 |
+
for i in range(self.num_spans):
|
| 647 |
+
# Inpaint second half
|
| 648 |
+
span = self.inpainter(inpaint=inpaint, inpaint_mask=inpaint_mask)
|
| 649 |
+
# Replace first half with generated second half
|
| 650 |
+
second_half = span[:, :, half_length:]
|
| 651 |
+
inpaint[:, :, :half_length] = second_half
|
| 652 |
+
# Save generated span
|
| 653 |
+
spans.append(second_half)
|
| 654 |
+
|
| 655 |
+
return torch.cat(spans, dim=2)
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
class XDiffusion(nn.Module):
|
| 659 |
+
def __init__(self, type: str, net: nn.Module, **kwargs):
|
| 660 |
+
super().__init__()
|
| 661 |
+
|
| 662 |
+
diffusion_classes = [VDiffusion, KDiffusion, VKDiffusion]
|
| 663 |
+
aliases = [t.alias for t in diffusion_classes] # type: ignore
|
| 664 |
+
message = f"type='{type}' must be one of {*aliases,}"
|
| 665 |
+
assert type in aliases, message
|
| 666 |
+
self.net = net
|
| 667 |
+
|
| 668 |
+
for XDiffusion in diffusion_classes:
|
| 669 |
+
if XDiffusion.alias == type: # type: ignore
|
| 670 |
+
self.diffusion = XDiffusion(net=net, **kwargs)
|
| 671 |
+
|
| 672 |
+
def forward(self, *args, **kwargs) -> Tensor:
|
| 673 |
+
return self.diffusion(*args, **kwargs)
|
| 674 |
+
|
| 675 |
+
def sample(
|
| 676 |
+
self,
|
| 677 |
+
noise: Tensor,
|
| 678 |
+
num_steps: int,
|
| 679 |
+
sigma_schedule: Schedule,
|
| 680 |
+
sampler: Sampler,
|
| 681 |
+
clamp: bool,
|
| 682 |
+
**kwargs,
|
| 683 |
+
) -> Tensor:
|
| 684 |
+
diffusion_sampler = DiffusionSampler(
|
| 685 |
+
diffusion=self.diffusion,
|
| 686 |
+
sampler=sampler,
|
| 687 |
+
sigma_schedule=sigma_schedule,
|
| 688 |
+
num_steps=num_steps,
|
| 689 |
+
clamp=clamp,
|
| 690 |
+
)
|
| 691 |
+
return diffusion_sampler(noise, **kwargs)
|
Modules/diffusion/utils.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import reduce
|
| 2 |
+
from inspect import isfunction
|
| 3 |
+
from math import ceil, floor, log2, pi
|
| 4 |
+
from typing import Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
from einops import rearrange
|
| 9 |
+
from torch import Generator, Tensor
|
| 10 |
+
from typing_extensions import TypeGuard
|
| 11 |
+
|
| 12 |
+
T = TypeVar("T")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def exists(val: Optional[T]) -> TypeGuard[T]:
|
| 16 |
+
return val is not None
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def iff(condition: bool, value: T) -> Optional[T]:
|
| 20 |
+
return value if condition else None
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def is_sequence(obj: T) -> TypeGuard[Union[list, tuple]]:
|
| 24 |
+
return isinstance(obj, list) or isinstance(obj, tuple)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def default(val: Optional[T], d: Union[Callable[..., T], T]) -> T:
|
| 28 |
+
if exists(val):
|
| 29 |
+
return val
|
| 30 |
+
return d() if isfunction(d) else d
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def to_list(val: Union[T, Sequence[T]]) -> List[T]:
|
| 34 |
+
if isinstance(val, tuple):
|
| 35 |
+
return list(val)
|
| 36 |
+
if isinstance(val, list):
|
| 37 |
+
return val
|
| 38 |
+
return [val] # type: ignore
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def prod(vals: Sequence[int]) -> int:
|
| 42 |
+
return reduce(lambda x, y: x * y, vals)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def closest_power_2(x: float) -> int:
|
| 46 |
+
exponent = log2(x)
|
| 47 |
+
distance_fn = lambda z: abs(x - 2 ** z) # noqa
|
| 48 |
+
exponent_closest = min((floor(exponent), ceil(exponent)), key=distance_fn)
|
| 49 |
+
return 2 ** int(exponent_closest)
|
| 50 |
+
|
| 51 |
+
def rand_bool(shape, proba, device = None):
|
| 52 |
+
if proba == 1:
|
| 53 |
+
return torch.ones(shape, device=device, dtype=torch.bool)
|
| 54 |
+
elif proba == 0:
|
| 55 |
+
return torch.zeros(shape, device=device, dtype=torch.bool)
|
| 56 |
+
else:
|
| 57 |
+
return torch.bernoulli(torch.full(shape, proba, device=device)).to(torch.bool)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
"""
|
| 61 |
+
Kwargs Utils
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def group_dict_by_prefix(prefix: str, d: Dict) -> Tuple[Dict, Dict]:
|
| 66 |
+
return_dicts: Tuple[Dict, Dict] = ({}, {})
|
| 67 |
+
for key in d.keys():
|
| 68 |
+
no_prefix = int(not key.startswith(prefix))
|
| 69 |
+
return_dicts[no_prefix][key] = d[key]
|
| 70 |
+
return return_dicts
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def groupby(prefix: str, d: Dict, keep_prefix: bool = False) -> Tuple[Dict, Dict]:
|
| 74 |
+
kwargs_with_prefix, kwargs = group_dict_by_prefix(prefix, d)
|
| 75 |
+
if keep_prefix:
|
| 76 |
+
return kwargs_with_prefix, kwargs
|
| 77 |
+
kwargs_no_prefix = {k[len(prefix) :]: v for k, v in kwargs_with_prefix.items()}
|
| 78 |
+
return kwargs_no_prefix, kwargs
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def prefix_dict(prefix: str, d: Dict) -> Dict:
|
| 82 |
+
return {prefix + str(k): v for k, v in d.items()}
|
Modules/discriminators.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from torch.nn import Conv1d, AvgPool1d, Conv2d
|
| 5 |
+
from torch.nn.utils import weight_norm, spectral_norm
|
| 6 |
+
|
| 7 |
+
from .utils import get_padding
|
| 8 |
+
|
| 9 |
+
LRELU_SLOPE = 0.1
|
| 10 |
+
|
| 11 |
+
def stft(x, fft_size, hop_size, win_length, window):
|
| 12 |
+
"""Perform STFT and convert to magnitude spectrogram.
|
| 13 |
+
Args:
|
| 14 |
+
x (Tensor): Input signal tensor (B, T).
|
| 15 |
+
fft_size (int): FFT size.
|
| 16 |
+
hop_size (int): Hop size.
|
| 17 |
+
win_length (int): Window length.
|
| 18 |
+
window (str): Window function type.
|
| 19 |
+
Returns:
|
| 20 |
+
Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).
|
| 21 |
+
"""
|
| 22 |
+
x_stft = torch.stft(x, fft_size, hop_size, win_length, window,
|
| 23 |
+
return_complex=True)
|
| 24 |
+
real = x_stft[..., 0]
|
| 25 |
+
imag = x_stft[..., 1]
|
| 26 |
+
|
| 27 |
+
return torch.abs(x_stft).transpose(2, 1)
|
| 28 |
+
|
| 29 |
+
class SpecDiscriminator(nn.Module):
|
| 30 |
+
"""docstring for Discriminator."""
|
| 31 |
+
|
| 32 |
+
def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window", use_spectral_norm=False):
|
| 33 |
+
super(SpecDiscriminator, self).__init__()
|
| 34 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
| 35 |
+
self.fft_size = fft_size
|
| 36 |
+
self.shift_size = shift_size
|
| 37 |
+
self.win_length = win_length
|
| 38 |
+
self.window = getattr(torch, window)(win_length)
|
| 39 |
+
self.discriminators = nn.ModuleList([
|
| 40 |
+
norm_f(nn.Conv2d(1, 32, kernel_size=(3, 9), padding=(1, 4))),
|
| 41 |
+
norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1,2), padding=(1, 4))),
|
| 42 |
+
norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1,2), padding=(1, 4))),
|
| 43 |
+
norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1,2), padding=(1, 4))),
|
| 44 |
+
norm_f(nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1,1), padding=(1, 1))),
|
| 45 |
+
])
|
| 46 |
+
|
| 47 |
+
self.out = norm_f(nn.Conv2d(32, 1, 3, 1, 1))
|
| 48 |
+
|
| 49 |
+
def forward(self, y):
|
| 50 |
+
|
| 51 |
+
fmap = []
|
| 52 |
+
y = y.squeeze(1)
|
| 53 |
+
y = stft(y, self.fft_size, self.shift_size, self.win_length, self.window.to(y.get_device()))
|
| 54 |
+
y = y.unsqueeze(1)
|
| 55 |
+
for i, d in enumerate(self.discriminators):
|
| 56 |
+
y = d(y)
|
| 57 |
+
y = F.leaky_relu(y, LRELU_SLOPE)
|
| 58 |
+
fmap.append(y)
|
| 59 |
+
|
| 60 |
+
y = self.out(y)
|
| 61 |
+
fmap.append(y)
|
| 62 |
+
|
| 63 |
+
return torch.flatten(y, 1, -1), fmap
|
| 64 |
+
|
| 65 |
+
class MultiResSpecDiscriminator(torch.nn.Module):
|
| 66 |
+
|
| 67 |
+
def __init__(self,
|
| 68 |
+
fft_sizes=[1024, 2048, 512],
|
| 69 |
+
hop_sizes=[120, 240, 50],
|
| 70 |
+
win_lengths=[600, 1200, 240],
|
| 71 |
+
window="hann_window"):
|
| 72 |
+
|
| 73 |
+
super(MultiResSpecDiscriminator, self).__init__()
|
| 74 |
+
self.discriminators = nn.ModuleList([
|
| 75 |
+
SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window),
|
| 76 |
+
SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window),
|
| 77 |
+
SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window)
|
| 78 |
+
])
|
| 79 |
+
|
| 80 |
+
def forward(self, y, y_hat):
|
| 81 |
+
y_d_rs = []
|
| 82 |
+
y_d_gs = []
|
| 83 |
+
fmap_rs = []
|
| 84 |
+
fmap_gs = []
|
| 85 |
+
for i, d in enumerate(self.discriminators):
|
| 86 |
+
y_d_r, fmap_r = d(y)
|
| 87 |
+
y_d_g, fmap_g = d(y_hat)
|
| 88 |
+
y_d_rs.append(y_d_r)
|
| 89 |
+
fmap_rs.append(fmap_r)
|
| 90 |
+
y_d_gs.append(y_d_g)
|
| 91 |
+
fmap_gs.append(fmap_g)
|
| 92 |
+
|
| 93 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class DiscriminatorP(torch.nn.Module):
|
| 97 |
+
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
| 98 |
+
super(DiscriminatorP, self).__init__()
|
| 99 |
+
self.period = period
|
| 100 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
| 101 |
+
self.convs = nn.ModuleList([
|
| 102 |
+
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
| 103 |
+
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
| 104 |
+
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
| 105 |
+
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
| 106 |
+
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
|
| 107 |
+
])
|
| 108 |
+
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
| 109 |
+
|
| 110 |
+
def forward(self, x):
|
| 111 |
+
fmap = []
|
| 112 |
+
|
| 113 |
+
# 1d to 2d
|
| 114 |
+
b, c, t = x.shape
|
| 115 |
+
if t % self.period != 0: # pad first
|
| 116 |
+
n_pad = self.period - (t % self.period)
|
| 117 |
+
x = F.pad(x, (0, n_pad), "reflect")
|
| 118 |
+
t = t + n_pad
|
| 119 |
+
x = x.view(b, c, t // self.period, self.period)
|
| 120 |
+
|
| 121 |
+
for l in self.convs:
|
| 122 |
+
x = l(x)
|
| 123 |
+
x = F.leaky_relu(x, LRELU_SLOPE)
|
| 124 |
+
fmap.append(x)
|
| 125 |
+
x = self.conv_post(x)
|
| 126 |
+
fmap.append(x)
|
| 127 |
+
x = torch.flatten(x, 1, -1)
|
| 128 |
+
|
| 129 |
+
return x, fmap
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class MultiPeriodDiscriminator(torch.nn.Module):
|
| 133 |
+
def __init__(self):
|
| 134 |
+
super(MultiPeriodDiscriminator, self).__init__()
|
| 135 |
+
self.discriminators = nn.ModuleList([
|
| 136 |
+
DiscriminatorP(2),
|
| 137 |
+
DiscriminatorP(3),
|
| 138 |
+
DiscriminatorP(5),
|
| 139 |
+
DiscriminatorP(7),
|
| 140 |
+
DiscriminatorP(11),
|
| 141 |
+
])
|
| 142 |
+
|
| 143 |
+
def forward(self, y, y_hat):
|
| 144 |
+
y_d_rs = []
|
| 145 |
+
y_d_gs = []
|
| 146 |
+
fmap_rs = []
|
| 147 |
+
fmap_gs = []
|
| 148 |
+
for i, d in enumerate(self.discriminators):
|
| 149 |
+
y_d_r, fmap_r = d(y)
|
| 150 |
+
y_d_g, fmap_g = d(y_hat)
|
| 151 |
+
y_d_rs.append(y_d_r)
|
| 152 |
+
fmap_rs.append(fmap_r)
|
| 153 |
+
y_d_gs.append(y_d_g)
|
| 154 |
+
fmap_gs.append(fmap_g)
|
| 155 |
+
|
| 156 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
| 157 |
+
|
| 158 |
+
class WavLMDiscriminator(nn.Module):
|
| 159 |
+
"""docstring for Discriminator."""
|
| 160 |
+
|
| 161 |
+
def __init__(self, slm_hidden=768,
|
| 162 |
+
slm_layers=13,
|
| 163 |
+
initial_channel=64,
|
| 164 |
+
use_spectral_norm=False):
|
| 165 |
+
super(WavLMDiscriminator, self).__init__()
|
| 166 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
| 167 |
+
self.pre = norm_f(Conv1d(slm_hidden * slm_layers, initial_channel, 1, 1, padding=0))
|
| 168 |
+
|
| 169 |
+
self.convs = nn.ModuleList([
|
| 170 |
+
norm_f(nn.Conv1d(initial_channel, initial_channel * 2, kernel_size=5, padding=2)),
|
| 171 |
+
norm_f(nn.Conv1d(initial_channel * 2, initial_channel * 4, kernel_size=5, padding=2)),
|
| 172 |
+
norm_f(nn.Conv1d(initial_channel * 4, initial_channel * 4, 5, 1, padding=2)),
|
| 173 |
+
])
|
| 174 |
+
|
| 175 |
+
self.conv_post = norm_f(Conv1d(initial_channel * 4, 1, 3, 1, padding=1))
|
| 176 |
+
|
| 177 |
+
def forward(self, x):
|
| 178 |
+
x = self.pre(x)
|
| 179 |
+
|
| 180 |
+
fmap = []
|
| 181 |
+
for l in self.convs:
|
| 182 |
+
x = l(x)
|
| 183 |
+
x = F.leaky_relu(x, LRELU_SLOPE)
|
| 184 |
+
fmap.append(x)
|
| 185 |
+
x = self.conv_post(x)
|
| 186 |
+
x = torch.flatten(x, 1, -1)
|
| 187 |
+
|
| 188 |
+
return x
|
Modules/hifigan.py
ADDED
|
@@ -0,0 +1,477 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
| 5 |
+
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
| 6 |
+
from .utils import init_weights, get_padding
|
| 7 |
+
|
| 8 |
+
import math
|
| 9 |
+
import random
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
LRELU_SLOPE = 0.1
|
| 13 |
+
|
| 14 |
+
class AdaIN1d(nn.Module):
|
| 15 |
+
def __init__(self, style_dim, num_features):
|
| 16 |
+
super().__init__()
|
| 17 |
+
self.norm = nn.InstanceNorm1d(num_features, affine=False)
|
| 18 |
+
self.fc = nn.Linear(style_dim, num_features*2)
|
| 19 |
+
|
| 20 |
+
def forward(self, x, s):
|
| 21 |
+
h = self.fc(s)
|
| 22 |
+
h = h.view(h.size(0), h.size(1), 1)
|
| 23 |
+
gamma, beta = torch.chunk(h, chunks=2, dim=1)
|
| 24 |
+
return (1 + gamma) * self.norm(x) + beta
|
| 25 |
+
|
| 26 |
+
class AdaINResBlock1(torch.nn.Module):
|
| 27 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), style_dim=64):
|
| 28 |
+
super(AdaINResBlock1, self).__init__()
|
| 29 |
+
self.convs1 = nn.ModuleList([
|
| 30 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
| 31 |
+
padding=get_padding(kernel_size, dilation[0]))),
|
| 32 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
| 33 |
+
padding=get_padding(kernel_size, dilation[1]))),
|
| 34 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
| 35 |
+
padding=get_padding(kernel_size, dilation[2])))
|
| 36 |
+
])
|
| 37 |
+
self.convs1.apply(init_weights)
|
| 38 |
+
|
| 39 |
+
self.convs2 = nn.ModuleList([
|
| 40 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| 41 |
+
padding=get_padding(kernel_size, 1))),
|
| 42 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| 43 |
+
padding=get_padding(kernel_size, 1))),
|
| 44 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| 45 |
+
padding=get_padding(kernel_size, 1)))
|
| 46 |
+
])
|
| 47 |
+
self.convs2.apply(init_weights)
|
| 48 |
+
|
| 49 |
+
self.adain1 = nn.ModuleList([
|
| 50 |
+
AdaIN1d(style_dim, channels),
|
| 51 |
+
AdaIN1d(style_dim, channels),
|
| 52 |
+
AdaIN1d(style_dim, channels),
|
| 53 |
+
])
|
| 54 |
+
|
| 55 |
+
self.adain2 = nn.ModuleList([
|
| 56 |
+
AdaIN1d(style_dim, channels),
|
| 57 |
+
AdaIN1d(style_dim, channels),
|
| 58 |
+
AdaIN1d(style_dim, channels),
|
| 59 |
+
])
|
| 60 |
+
|
| 61 |
+
self.alpha1 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs1))])
|
| 62 |
+
self.alpha2 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs2))])
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def forward(self, x, s):
|
| 66 |
+
for c1, c2, n1, n2, a1, a2 in zip(self.convs1, self.convs2, self.adain1, self.adain2, self.alpha1, self.alpha2):
|
| 67 |
+
xt = n1(x, s)
|
| 68 |
+
xt = xt + (1 / a1) * (torch.sin(a1 * xt) ** 2) # Snake1D
|
| 69 |
+
xt = c1(xt)
|
| 70 |
+
xt = n2(xt, s)
|
| 71 |
+
xt = xt + (1 / a2) * (torch.sin(a2 * xt) ** 2) # Snake1D
|
| 72 |
+
xt = c2(xt)
|
| 73 |
+
x = xt + x
|
| 74 |
+
return x
|
| 75 |
+
|
| 76 |
+
def remove_weight_norm(self):
|
| 77 |
+
for l in self.convs1:
|
| 78 |
+
remove_weight_norm(l)
|
| 79 |
+
for l in self.convs2:
|
| 80 |
+
remove_weight_norm(l)
|
| 81 |
+
|
| 82 |
+
class SineGen(torch.nn.Module):
|
| 83 |
+
""" Definition of sine generator
|
| 84 |
+
SineGen(samp_rate, harmonic_num = 0,
|
| 85 |
+
sine_amp = 0.1, noise_std = 0.003,
|
| 86 |
+
voiced_threshold = 0,
|
| 87 |
+
flag_for_pulse=False)
|
| 88 |
+
samp_rate: sampling rate in Hz
|
| 89 |
+
harmonic_num: number of harmonic overtones (default 0)
|
| 90 |
+
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
| 91 |
+
noise_std: std of Gaussian noise (default 0.003)
|
| 92 |
+
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
| 93 |
+
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
| 94 |
+
Note: when flag_for_pulse is True, the first time step of a voiced
|
| 95 |
+
segment is always sin(np.pi) or cos(0)
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
def __init__(self, samp_rate, upsample_scale, harmonic_num=0,
|
| 99 |
+
sine_amp=0.1, noise_std=0.003,
|
| 100 |
+
voiced_threshold=0,
|
| 101 |
+
flag_for_pulse=False):
|
| 102 |
+
super(SineGen, self).__init__()
|
| 103 |
+
self.sine_amp = sine_amp
|
| 104 |
+
self.noise_std = noise_std
|
| 105 |
+
self.harmonic_num = harmonic_num
|
| 106 |
+
self.dim = self.harmonic_num + 1
|
| 107 |
+
self.sampling_rate = samp_rate
|
| 108 |
+
self.voiced_threshold = voiced_threshold
|
| 109 |
+
self.flag_for_pulse = flag_for_pulse
|
| 110 |
+
self.upsample_scale = upsample_scale
|
| 111 |
+
|
| 112 |
+
def _f02uv(self, f0):
|
| 113 |
+
# generate uv signal
|
| 114 |
+
uv = (f0 > self.voiced_threshold).type(torch.float32)
|
| 115 |
+
return uv
|
| 116 |
+
|
| 117 |
+
def _f02sine(self, f0_values):
|
| 118 |
+
""" f0_values: (batchsize, length, dim)
|
| 119 |
+
where dim indicates fundamental tone and overtones
|
| 120 |
+
"""
|
| 121 |
+
# convert to F0 in rad. The interger part n can be ignored
|
| 122 |
+
# because 2 * np.pi * n doesn't affect phase
|
| 123 |
+
rad_values = (f0_values / self.sampling_rate) % 1
|
| 124 |
+
|
| 125 |
+
# initial phase noise (no noise for fundamental component)
|
| 126 |
+
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
|
| 127 |
+
device=f0_values.device)
|
| 128 |
+
rand_ini[:, 0] = 0
|
| 129 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
| 130 |
+
|
| 131 |
+
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
|
| 132 |
+
if not self.flag_for_pulse:
|
| 133 |
+
# # for normal case
|
| 134 |
+
|
| 135 |
+
# # To prevent torch.cumsum numerical overflow,
|
| 136 |
+
# # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
|
| 137 |
+
# # Buffer tmp_over_one_idx indicates the time step to add -1.
|
| 138 |
+
# # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
|
| 139 |
+
# tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
| 140 |
+
# tmp_over_one_idx = (padDiff(tmp_over_one)) < 0
|
| 141 |
+
# cumsum_shift = torch.zeros_like(rad_values)
|
| 142 |
+
# cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
| 143 |
+
|
| 144 |
+
# phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi
|
| 145 |
+
rad_values = torch.nn.functional.interpolate(rad_values.transpose(1, 2),
|
| 146 |
+
scale_factor=1/self.upsample_scale,
|
| 147 |
+
mode="linear").transpose(1, 2)
|
| 148 |
+
|
| 149 |
+
# tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
| 150 |
+
# tmp_over_one_idx = (padDiff(tmp_over_one)) < 0
|
| 151 |
+
# cumsum_shift = torch.zeros_like(rad_values)
|
| 152 |
+
# cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
| 153 |
+
|
| 154 |
+
phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi
|
| 155 |
+
phase = torch.nn.functional.interpolate(phase.transpose(1, 2) * self.upsample_scale,
|
| 156 |
+
scale_factor=self.upsample_scale, mode="linear").transpose(1, 2)
|
| 157 |
+
sines = torch.sin(phase)
|
| 158 |
+
|
| 159 |
+
else:
|
| 160 |
+
# If necessary, make sure that the first time step of every
|
| 161 |
+
# voiced segments is sin(pi) or cos(0)
|
| 162 |
+
# This is used for pulse-train generation
|
| 163 |
+
|
| 164 |
+
# identify the last time step in unvoiced segments
|
| 165 |
+
uv = self._f02uv(f0_values)
|
| 166 |
+
uv_1 = torch.roll(uv, shifts=-1, dims=1)
|
| 167 |
+
uv_1[:, -1, :] = 1
|
| 168 |
+
u_loc = (uv < 1) * (uv_1 > 0)
|
| 169 |
+
|
| 170 |
+
# get the instantanouse phase
|
| 171 |
+
tmp_cumsum = torch.cumsum(rad_values, dim=1)
|
| 172 |
+
# different batch needs to be processed differently
|
| 173 |
+
for idx in range(f0_values.shape[0]):
|
| 174 |
+
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
|
| 175 |
+
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
|
| 176 |
+
# stores the accumulation of i.phase within
|
| 177 |
+
# each voiced segments
|
| 178 |
+
tmp_cumsum[idx, :, :] = 0
|
| 179 |
+
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
|
| 180 |
+
|
| 181 |
+
# rad_values - tmp_cumsum: remove the accumulation of i.phase
|
| 182 |
+
# within the previous voiced segment.
|
| 183 |
+
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
|
| 184 |
+
|
| 185 |
+
# get the sines
|
| 186 |
+
sines = torch.cos(i_phase * 2 * np.pi)
|
| 187 |
+
return sines
|
| 188 |
+
|
| 189 |
+
def forward(self, f0):
|
| 190 |
+
""" sine_tensor, uv = forward(f0)
|
| 191 |
+
input F0: tensor(batchsize=1, length, dim=1)
|
| 192 |
+
f0 for unvoiced steps should be 0
|
| 193 |
+
output sine_tensor: tensor(batchsize=1, length, dim)
|
| 194 |
+
output uv: tensor(batchsize=1, length, 1)
|
| 195 |
+
"""
|
| 196 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim,
|
| 197 |
+
device=f0.device)
|
| 198 |
+
# fundamental component
|
| 199 |
+
fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device))
|
| 200 |
+
|
| 201 |
+
# generate sine waveforms
|
| 202 |
+
sine_waves = self._f02sine(fn) * self.sine_amp
|
| 203 |
+
|
| 204 |
+
# generate uv signal
|
| 205 |
+
# uv = torch.ones(f0.shape)
|
| 206 |
+
# uv = uv * (f0 > self.voiced_threshold)
|
| 207 |
+
uv = self._f02uv(f0)
|
| 208 |
+
|
| 209 |
+
# noise: for unvoiced should be similar to sine_amp
|
| 210 |
+
# std = self.sine_amp/3 -> max value ~ self.sine_amp
|
| 211 |
+
# . for voiced regions is self.noise_std
|
| 212 |
+
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
| 213 |
+
noise = noise_amp * torch.randn_like(sine_waves)
|
| 214 |
+
|
| 215 |
+
# first: set the unvoiced part to 0 by uv
|
| 216 |
+
# then: additive noise
|
| 217 |
+
sine_waves = sine_waves * uv + noise
|
| 218 |
+
return sine_waves, uv, noise
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
| 222 |
+
""" SourceModule for hn-nsf
|
| 223 |
+
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
| 224 |
+
add_noise_std=0.003, voiced_threshod=0)
|
| 225 |
+
sampling_rate: sampling_rate in Hz
|
| 226 |
+
harmonic_num: number of harmonic above F0 (default: 0)
|
| 227 |
+
sine_amp: amplitude of sine source signal (default: 0.1)
|
| 228 |
+
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
| 229 |
+
note that amplitude of noise in unvoiced is decided
|
| 230 |
+
by sine_amp
|
| 231 |
+
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
| 232 |
+
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
| 233 |
+
F0_sampled (batchsize, length, 1)
|
| 234 |
+
Sine_source (batchsize, length, 1)
|
| 235 |
+
noise_source (batchsize, length 1)
|
| 236 |
+
uv (batchsize, length, 1)
|
| 237 |
+
"""
|
| 238 |
+
|
| 239 |
+
def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1,
|
| 240 |
+
add_noise_std=0.003, voiced_threshod=0):
|
| 241 |
+
super(SourceModuleHnNSF, self).__init__()
|
| 242 |
+
|
| 243 |
+
self.sine_amp = sine_amp
|
| 244 |
+
self.noise_std = add_noise_std
|
| 245 |
+
|
| 246 |
+
# to produce sine waveforms
|
| 247 |
+
self.l_sin_gen = SineGen(sampling_rate, upsample_scale, harmonic_num,
|
| 248 |
+
sine_amp, add_noise_std, voiced_threshod)
|
| 249 |
+
|
| 250 |
+
# to merge source harmonics into a single excitation
|
| 251 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
| 252 |
+
self.l_tanh = torch.nn.Tanh()
|
| 253 |
+
|
| 254 |
+
def forward(self, x):
|
| 255 |
+
"""
|
| 256 |
+
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
| 257 |
+
F0_sampled (batchsize, length, 1)
|
| 258 |
+
Sine_source (batchsize, length, 1)
|
| 259 |
+
noise_source (batchsize, length 1)
|
| 260 |
+
"""
|
| 261 |
+
# source for harmonic branch
|
| 262 |
+
with torch.no_grad():
|
| 263 |
+
sine_wavs, uv, _ = self.l_sin_gen(x)
|
| 264 |
+
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
| 265 |
+
|
| 266 |
+
# source for noise branch, in the same shape as uv
|
| 267 |
+
noise = torch.randn_like(uv) * self.sine_amp / 3
|
| 268 |
+
return sine_merge, noise, uv
|
| 269 |
+
def padDiff(x):
|
| 270 |
+
return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0)
|
| 271 |
+
|
| 272 |
+
class Generator(torch.nn.Module):
|
| 273 |
+
def __init__(self, style_dim, resblock_kernel_sizes, upsample_rates, upsample_initial_channel, resblock_dilation_sizes, upsample_kernel_sizes):
|
| 274 |
+
super(Generator, self).__init__()
|
| 275 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
| 276 |
+
self.num_upsamples = len(upsample_rates)
|
| 277 |
+
resblock = AdaINResBlock1
|
| 278 |
+
|
| 279 |
+
self.m_source = SourceModuleHnNSF(
|
| 280 |
+
sampling_rate=24000,
|
| 281 |
+
upsample_scale=np.prod(upsample_rates),
|
| 282 |
+
harmonic_num=8, voiced_threshod=10)
|
| 283 |
+
|
| 284 |
+
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
|
| 285 |
+
self.noise_convs = nn.ModuleList()
|
| 286 |
+
self.ups = nn.ModuleList()
|
| 287 |
+
self.noise_res = nn.ModuleList()
|
| 288 |
+
|
| 289 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
| 290 |
+
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
| 291 |
+
|
| 292 |
+
self.ups.append(weight_norm(ConvTranspose1d(upsample_initial_channel//(2**i),
|
| 293 |
+
upsample_initial_channel//(2**(i+1)),
|
| 294 |
+
k, u, padding=(u//2 + u%2), output_padding=u%2)))
|
| 295 |
+
|
| 296 |
+
if i + 1 < len(upsample_rates): #
|
| 297 |
+
stride_f0 = np.prod(upsample_rates[i + 1:])
|
| 298 |
+
self.noise_convs.append(Conv1d(
|
| 299 |
+
1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+1) // 2))
|
| 300 |
+
self.noise_res.append(resblock(c_cur, 7, [1,3,5], style_dim))
|
| 301 |
+
else:
|
| 302 |
+
self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
|
| 303 |
+
self.noise_res.append(resblock(c_cur, 11, [1,3,5], style_dim))
|
| 304 |
+
|
| 305 |
+
self.resblocks = nn.ModuleList()
|
| 306 |
+
|
| 307 |
+
self.alphas = nn.ParameterList()
|
| 308 |
+
self.alphas.append(nn.Parameter(torch.ones(1, upsample_initial_channel, 1)))
|
| 309 |
+
|
| 310 |
+
for i in range(len(self.ups)):
|
| 311 |
+
ch = upsample_initial_channel//(2**(i+1))
|
| 312 |
+
self.alphas.append(nn.Parameter(torch.ones(1, ch, 1)))
|
| 313 |
+
|
| 314 |
+
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
| 315 |
+
self.resblocks.append(resblock(ch, k, d, style_dim))
|
| 316 |
+
|
| 317 |
+
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
|
| 318 |
+
self.ups.apply(init_weights)
|
| 319 |
+
self.conv_post.apply(init_weights)
|
| 320 |
+
|
| 321 |
+
def forward(self, x, s, f0):
|
| 322 |
+
|
| 323 |
+
f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
|
| 324 |
+
|
| 325 |
+
har_source, noi_source, uv = self.m_source(f0)
|
| 326 |
+
har_source = har_source.transpose(1, 2)
|
| 327 |
+
|
| 328 |
+
for i in range(self.num_upsamples):
|
| 329 |
+
x = x + (1 / self.alphas[i]) * (torch.sin(self.alphas[i] * x) ** 2)
|
| 330 |
+
x_source = self.noise_convs[i](har_source)
|
| 331 |
+
x_source = self.noise_res[i](x_source, s)
|
| 332 |
+
|
| 333 |
+
x = self.ups[i](x)
|
| 334 |
+
x = x + x_source
|
| 335 |
+
|
| 336 |
+
xs = None
|
| 337 |
+
for j in range(self.num_kernels):
|
| 338 |
+
if xs is None:
|
| 339 |
+
xs = self.resblocks[i*self.num_kernels+j](x, s)
|
| 340 |
+
else:
|
| 341 |
+
xs += self.resblocks[i*self.num_kernels+j](x, s)
|
| 342 |
+
x = xs / self.num_kernels
|
| 343 |
+
x = x + (1 / self.alphas[i+1]) * (torch.sin(self.alphas[i+1] * x) ** 2)
|
| 344 |
+
x = self.conv_post(x)
|
| 345 |
+
x = torch.tanh(x)
|
| 346 |
+
|
| 347 |
+
return x
|
| 348 |
+
|
| 349 |
+
def remove_weight_norm(self):
|
| 350 |
+
print('Removing weight norm...')
|
| 351 |
+
for l in self.ups:
|
| 352 |
+
remove_weight_norm(l)
|
| 353 |
+
for l in self.resblocks:
|
| 354 |
+
l.remove_weight_norm()
|
| 355 |
+
remove_weight_norm(self.conv_pre)
|
| 356 |
+
remove_weight_norm(self.conv_post)
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
class AdainResBlk1d(nn.Module):
|
| 360 |
+
def __init__(self, dim_in, dim_out, style_dim=64, actv=nn.LeakyReLU(0.2),
|
| 361 |
+
upsample='none', dropout_p=0.0):
|
| 362 |
+
super().__init__()
|
| 363 |
+
self.actv = actv
|
| 364 |
+
self.upsample_type = upsample
|
| 365 |
+
self.upsample = UpSample1d(upsample)
|
| 366 |
+
self.learned_sc = dim_in != dim_out
|
| 367 |
+
self._build_weights(dim_in, dim_out, style_dim)
|
| 368 |
+
self.dropout = nn.Dropout(dropout_p)
|
| 369 |
+
|
| 370 |
+
if upsample == 'none':
|
| 371 |
+
self.pool = nn.Identity()
|
| 372 |
+
else:
|
| 373 |
+
self.pool = weight_norm(nn.ConvTranspose1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1, output_padding=1))
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def _build_weights(self, dim_in, dim_out, style_dim):
|
| 377 |
+
self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1))
|
| 378 |
+
self.conv2 = weight_norm(nn.Conv1d(dim_out, dim_out, 3, 1, 1))
|
| 379 |
+
self.norm1 = AdaIN1d(style_dim, dim_in)
|
| 380 |
+
self.norm2 = AdaIN1d(style_dim, dim_out)
|
| 381 |
+
if self.learned_sc:
|
| 382 |
+
self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False))
|
| 383 |
+
|
| 384 |
+
def _shortcut(self, x):
|
| 385 |
+
x = self.upsample(x)
|
| 386 |
+
if self.learned_sc:
|
| 387 |
+
x = self.conv1x1(x)
|
| 388 |
+
return x
|
| 389 |
+
|
| 390 |
+
def _residual(self, x, s):
|
| 391 |
+
x = self.norm1(x, s)
|
| 392 |
+
x = self.actv(x)
|
| 393 |
+
x = self.pool(x)
|
| 394 |
+
x = self.conv1(self.dropout(x))
|
| 395 |
+
x = self.norm2(x, s)
|
| 396 |
+
x = self.actv(x)
|
| 397 |
+
x = self.conv2(self.dropout(x))
|
| 398 |
+
return x
|
| 399 |
+
|
| 400 |
+
def forward(self, x, s):
|
| 401 |
+
out = self._residual(x, s)
|
| 402 |
+
out = (out + self._shortcut(x)) / math.sqrt(2)
|
| 403 |
+
return out
|
| 404 |
+
|
| 405 |
+
class UpSample1d(nn.Module):
|
| 406 |
+
def __init__(self, layer_type):
|
| 407 |
+
super().__init__()
|
| 408 |
+
self.layer_type = layer_type
|
| 409 |
+
|
| 410 |
+
def forward(self, x):
|
| 411 |
+
if self.layer_type == 'none':
|
| 412 |
+
return x
|
| 413 |
+
else:
|
| 414 |
+
return F.interpolate(x, scale_factor=2, mode='nearest')
|
| 415 |
+
|
| 416 |
+
class Decoder(nn.Module):
|
| 417 |
+
def __init__(self, dim_in=512, F0_channel=512, style_dim=64, dim_out=80,
|
| 418 |
+
resblock_kernel_sizes = [3,7,11],
|
| 419 |
+
upsample_rates = [10,5,3,2],
|
| 420 |
+
upsample_initial_channel=512,
|
| 421 |
+
resblock_dilation_sizes=[[1,3,5], [1,3,5], [1,3,5]],
|
| 422 |
+
upsample_kernel_sizes=[20,10,6,4]):
|
| 423 |
+
super().__init__()
|
| 424 |
+
|
| 425 |
+
self.decode = nn.ModuleList()
|
| 426 |
+
|
| 427 |
+
self.encode = AdainResBlk1d(dim_in + 2, 1024, style_dim)
|
| 428 |
+
|
| 429 |
+
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim))
|
| 430 |
+
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim))
|
| 431 |
+
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim))
|
| 432 |
+
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 512, style_dim, upsample=True))
|
| 433 |
+
|
| 434 |
+
self.F0_conv = weight_norm(nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1))
|
| 435 |
+
|
| 436 |
+
self.N_conv = weight_norm(nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1))
|
| 437 |
+
|
| 438 |
+
self.asr_res = nn.Sequential(
|
| 439 |
+
weight_norm(nn.Conv1d(512, 64, kernel_size=1)),
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
self.generator = Generator(style_dim, resblock_kernel_sizes, upsample_rates, upsample_initial_channel, resblock_dilation_sizes, upsample_kernel_sizes)
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
def forward(self, asr, F0_curve, N, s):
|
| 447 |
+
if self.training:
|
| 448 |
+
downlist = [0, 3, 7]
|
| 449 |
+
F0_down = downlist[random.randint(0, 2)]
|
| 450 |
+
downlist = [0, 3, 7, 15]
|
| 451 |
+
N_down = downlist[random.randint(0, 3)]
|
| 452 |
+
if F0_down:
|
| 453 |
+
F0_curve = nn.functional.conv1d(F0_curve.unsqueeze(1), torch.ones(1, 1, F0_down).to('cuda'), padding=F0_down//2).squeeze(1) / F0_down
|
| 454 |
+
if N_down:
|
| 455 |
+
N = nn.functional.conv1d(N.unsqueeze(1), torch.ones(1, 1, N_down).to('cuda'), padding=N_down//2).squeeze(1) / N_down
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
F0 = self.F0_conv(F0_curve.unsqueeze(1))
|
| 459 |
+
N = self.N_conv(N.unsqueeze(1))
|
| 460 |
+
|
| 461 |
+
x = torch.cat([asr, F0, N], axis=1)
|
| 462 |
+
x = self.encode(x, s)
|
| 463 |
+
|
| 464 |
+
asr_res = self.asr_res(asr)
|
| 465 |
+
|
| 466 |
+
res = True
|
| 467 |
+
for block in self.decode:
|
| 468 |
+
if res:
|
| 469 |
+
x = torch.cat([x, asr_res, F0, N], axis=1)
|
| 470 |
+
x = block(x, s)
|
| 471 |
+
if block.upsample_type != "none":
|
| 472 |
+
res = False
|
| 473 |
+
|
| 474 |
+
x = self.generator(x, s, F0_curve)
|
| 475 |
+
return x
|
| 476 |
+
|
| 477 |
+
|
Modules/istftnet.py
ADDED
|
@@ -0,0 +1,530 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
| 5 |
+
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
| 6 |
+
from .utils import init_weights, get_padding
|
| 7 |
+
|
| 8 |
+
import math
|
| 9 |
+
import random
|
| 10 |
+
import numpy as np
|
| 11 |
+
from scipy.signal import get_window
|
| 12 |
+
|
| 13 |
+
LRELU_SLOPE = 0.1
|
| 14 |
+
|
| 15 |
+
class AdaIN1d(nn.Module):
|
| 16 |
+
def __init__(self, style_dim, num_features):
|
| 17 |
+
super().__init__()
|
| 18 |
+
self.norm = nn.InstanceNorm1d(num_features, affine=False)
|
| 19 |
+
self.fc = nn.Linear(style_dim, num_features*2)
|
| 20 |
+
|
| 21 |
+
def forward(self, x, s):
|
| 22 |
+
h = self.fc(s)
|
| 23 |
+
h = h.view(h.size(0), h.size(1), 1)
|
| 24 |
+
gamma, beta = torch.chunk(h, chunks=2, dim=1)
|
| 25 |
+
return (1 + gamma) * self.norm(x) + beta
|
| 26 |
+
|
| 27 |
+
class AdaINResBlock1(torch.nn.Module):
|
| 28 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), style_dim=64):
|
| 29 |
+
super(AdaINResBlock1, self).__init__()
|
| 30 |
+
self.convs1 = nn.ModuleList([
|
| 31 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
| 32 |
+
padding=get_padding(kernel_size, dilation[0]))),
|
| 33 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
| 34 |
+
padding=get_padding(kernel_size, dilation[1]))),
|
| 35 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
| 36 |
+
padding=get_padding(kernel_size, dilation[2])))
|
| 37 |
+
])
|
| 38 |
+
self.convs1.apply(init_weights)
|
| 39 |
+
|
| 40 |
+
self.convs2 = nn.ModuleList([
|
| 41 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| 42 |
+
padding=get_padding(kernel_size, 1))),
|
| 43 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| 44 |
+
padding=get_padding(kernel_size, 1))),
|
| 45 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| 46 |
+
padding=get_padding(kernel_size, 1)))
|
| 47 |
+
])
|
| 48 |
+
self.convs2.apply(init_weights)
|
| 49 |
+
|
| 50 |
+
self.adain1 = nn.ModuleList([
|
| 51 |
+
AdaIN1d(style_dim, channels),
|
| 52 |
+
AdaIN1d(style_dim, channels),
|
| 53 |
+
AdaIN1d(style_dim, channels),
|
| 54 |
+
])
|
| 55 |
+
|
| 56 |
+
self.adain2 = nn.ModuleList([
|
| 57 |
+
AdaIN1d(style_dim, channels),
|
| 58 |
+
AdaIN1d(style_dim, channels),
|
| 59 |
+
AdaIN1d(style_dim, channels),
|
| 60 |
+
])
|
| 61 |
+
|
| 62 |
+
self.alpha1 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs1))])
|
| 63 |
+
self.alpha2 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs2))])
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def forward(self, x, s):
|
| 67 |
+
for c1, c2, n1, n2, a1, a2 in zip(self.convs1, self.convs2, self.adain1, self.adain2, self.alpha1, self.alpha2):
|
| 68 |
+
xt = n1(x, s)
|
| 69 |
+
xt = xt + (1 / a1) * (torch.sin(a1 * xt) ** 2) # Snake1D
|
| 70 |
+
xt = c1(xt)
|
| 71 |
+
xt = n2(xt, s)
|
| 72 |
+
xt = xt + (1 / a2) * (torch.sin(a2 * xt) ** 2) # Snake1D
|
| 73 |
+
xt = c2(xt)
|
| 74 |
+
x = xt + x
|
| 75 |
+
return x
|
| 76 |
+
|
| 77 |
+
def remove_weight_norm(self):
|
| 78 |
+
for l in self.convs1:
|
| 79 |
+
remove_weight_norm(l)
|
| 80 |
+
for l in self.convs2:
|
| 81 |
+
remove_weight_norm(l)
|
| 82 |
+
|
| 83 |
+
class TorchSTFT(torch.nn.Module):
|
| 84 |
+
def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'):
|
| 85 |
+
super().__init__()
|
| 86 |
+
self.filter_length = filter_length
|
| 87 |
+
self.hop_length = hop_length
|
| 88 |
+
self.win_length = win_length
|
| 89 |
+
self.window = torch.from_numpy(get_window(window, win_length, fftbins=True).astype(np.float32))
|
| 90 |
+
|
| 91 |
+
def transform(self, input_data):
|
| 92 |
+
forward_transform = torch.stft(
|
| 93 |
+
input_data,
|
| 94 |
+
self.filter_length, self.hop_length, self.win_length, window=self.window.to(input_data.device),
|
| 95 |
+
return_complex=True)
|
| 96 |
+
|
| 97 |
+
return torch.abs(forward_transform), torch.angle(forward_transform)
|
| 98 |
+
|
| 99 |
+
def inverse(self, magnitude, phase):
|
| 100 |
+
inverse_transform = torch.istft(
|
| 101 |
+
magnitude * torch.exp(phase * 1j),
|
| 102 |
+
self.filter_length, self.hop_length, self.win_length, window=self.window.to(magnitude.device))
|
| 103 |
+
|
| 104 |
+
return inverse_transform.unsqueeze(-2) # unsqueeze to stay consistent with conv_transpose1d implementation
|
| 105 |
+
|
| 106 |
+
def forward(self, input_data):
|
| 107 |
+
self.magnitude, self.phase = self.transform(input_data)
|
| 108 |
+
reconstruction = self.inverse(self.magnitude, self.phase)
|
| 109 |
+
return reconstruction
|
| 110 |
+
|
| 111 |
+
class SineGen(torch.nn.Module):
|
| 112 |
+
""" Definition of sine generator
|
| 113 |
+
SineGen(samp_rate, harmonic_num = 0,
|
| 114 |
+
sine_amp = 0.1, noise_std = 0.003,
|
| 115 |
+
voiced_threshold = 0,
|
| 116 |
+
flag_for_pulse=False)
|
| 117 |
+
samp_rate: sampling rate in Hz
|
| 118 |
+
harmonic_num: number of harmonic overtones (default 0)
|
| 119 |
+
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
| 120 |
+
noise_std: std of Gaussian noise (default 0.003)
|
| 121 |
+
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
| 122 |
+
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
| 123 |
+
Note: when flag_for_pulse is True, the first time step of a voiced
|
| 124 |
+
segment is always sin(np.pi) or cos(0)
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
def __init__(self, samp_rate, upsample_scale, harmonic_num=0,
|
| 128 |
+
sine_amp=0.1, noise_std=0.003,
|
| 129 |
+
voiced_threshold=0,
|
| 130 |
+
flag_for_pulse=False):
|
| 131 |
+
super(SineGen, self).__init__()
|
| 132 |
+
self.sine_amp = sine_amp
|
| 133 |
+
self.noise_std = noise_std
|
| 134 |
+
self.harmonic_num = harmonic_num
|
| 135 |
+
self.dim = self.harmonic_num + 1
|
| 136 |
+
self.sampling_rate = samp_rate
|
| 137 |
+
self.voiced_threshold = voiced_threshold
|
| 138 |
+
self.flag_for_pulse = flag_for_pulse
|
| 139 |
+
self.upsample_scale = upsample_scale
|
| 140 |
+
|
| 141 |
+
def _f02uv(self, f0):
|
| 142 |
+
# generate uv signal
|
| 143 |
+
uv = (f0 > self.voiced_threshold).type(torch.float32)
|
| 144 |
+
return uv
|
| 145 |
+
|
| 146 |
+
def _f02sine(self, f0_values):
|
| 147 |
+
""" f0_values: (batchsize, length, dim)
|
| 148 |
+
where dim indicates fundamental tone and overtones
|
| 149 |
+
"""
|
| 150 |
+
# convert to F0 in rad. The interger part n can be ignored
|
| 151 |
+
# because 2 * np.pi * n doesn't affect phase
|
| 152 |
+
rad_values = (f0_values / self.sampling_rate) % 1
|
| 153 |
+
|
| 154 |
+
# initial phase noise (no noise for fundamental component)
|
| 155 |
+
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
|
| 156 |
+
device=f0_values.device)
|
| 157 |
+
rand_ini[:, 0] = 0
|
| 158 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
| 159 |
+
|
| 160 |
+
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
|
| 161 |
+
if not self.flag_for_pulse:
|
| 162 |
+
# # for normal case
|
| 163 |
+
|
| 164 |
+
# # To prevent torch.cumsum numerical overflow,
|
| 165 |
+
# # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
|
| 166 |
+
# # Buffer tmp_over_one_idx indicates the time step to add -1.
|
| 167 |
+
# # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
|
| 168 |
+
# tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
| 169 |
+
# tmp_over_one_idx = (padDiff(tmp_over_one)) < 0
|
| 170 |
+
# cumsum_shift = torch.zeros_like(rad_values)
|
| 171 |
+
# cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
| 172 |
+
|
| 173 |
+
# phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi
|
| 174 |
+
rad_values = torch.nn.functional.interpolate(rad_values.transpose(1, 2),
|
| 175 |
+
scale_factor=1/self.upsample_scale,
|
| 176 |
+
mode="linear").transpose(1, 2)
|
| 177 |
+
|
| 178 |
+
# tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
| 179 |
+
# tmp_over_one_idx = (padDiff(tmp_over_one)) < 0
|
| 180 |
+
# cumsum_shift = torch.zeros_like(rad_values)
|
| 181 |
+
# cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
| 182 |
+
|
| 183 |
+
phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi
|
| 184 |
+
phase = torch.nn.functional.interpolate(phase.transpose(1, 2) * self.upsample_scale,
|
| 185 |
+
scale_factor=self.upsample_scale, mode="linear").transpose(1, 2)
|
| 186 |
+
sines = torch.sin(phase)
|
| 187 |
+
|
| 188 |
+
else:
|
| 189 |
+
# If necessary, make sure that the first time step of every
|
| 190 |
+
# voiced segments is sin(pi) or cos(0)
|
| 191 |
+
# This is used for pulse-train generation
|
| 192 |
+
|
| 193 |
+
# identify the last time step in unvoiced segments
|
| 194 |
+
uv = self._f02uv(f0_values)
|
| 195 |
+
uv_1 = torch.roll(uv, shifts=-1, dims=1)
|
| 196 |
+
uv_1[:, -1, :] = 1
|
| 197 |
+
u_loc = (uv < 1) * (uv_1 > 0)
|
| 198 |
+
|
| 199 |
+
# get the instantanouse phase
|
| 200 |
+
tmp_cumsum = torch.cumsum(rad_values, dim=1)
|
| 201 |
+
# different batch needs to be processed differently
|
| 202 |
+
for idx in range(f0_values.shape[0]):
|
| 203 |
+
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
|
| 204 |
+
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
|
| 205 |
+
# stores the accumulation of i.phase within
|
| 206 |
+
# each voiced segments
|
| 207 |
+
tmp_cumsum[idx, :, :] = 0
|
| 208 |
+
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
|
| 209 |
+
|
| 210 |
+
# rad_values - tmp_cumsum: remove the accumulation of i.phase
|
| 211 |
+
# within the previous voiced segment.
|
| 212 |
+
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
|
| 213 |
+
|
| 214 |
+
# get the sines
|
| 215 |
+
sines = torch.cos(i_phase * 2 * np.pi)
|
| 216 |
+
return sines
|
| 217 |
+
|
| 218 |
+
def forward(self, f0):
|
| 219 |
+
""" sine_tensor, uv = forward(f0)
|
| 220 |
+
input F0: tensor(batchsize=1, length, dim=1)
|
| 221 |
+
f0 for unvoiced steps should be 0
|
| 222 |
+
output sine_tensor: tensor(batchsize=1, length, dim)
|
| 223 |
+
output uv: tensor(batchsize=1, length, 1)
|
| 224 |
+
"""
|
| 225 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim,
|
| 226 |
+
device=f0.device)
|
| 227 |
+
# fundamental component
|
| 228 |
+
fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device))
|
| 229 |
+
|
| 230 |
+
# generate sine waveforms
|
| 231 |
+
sine_waves = self._f02sine(fn) * self.sine_amp
|
| 232 |
+
|
| 233 |
+
# generate uv signal
|
| 234 |
+
# uv = torch.ones(f0.shape)
|
| 235 |
+
# uv = uv * (f0 > self.voiced_threshold)
|
| 236 |
+
uv = self._f02uv(f0)
|
| 237 |
+
|
| 238 |
+
# noise: for unvoiced should be similar to sine_amp
|
| 239 |
+
# std = self.sine_amp/3 -> max value ~ self.sine_amp
|
| 240 |
+
# . for voiced regions is self.noise_std
|
| 241 |
+
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
| 242 |
+
noise = noise_amp * torch.randn_like(sine_waves)
|
| 243 |
+
|
| 244 |
+
# first: set the unvoiced part to 0 by uv
|
| 245 |
+
# then: additive noise
|
| 246 |
+
sine_waves = sine_waves * uv + noise
|
| 247 |
+
return sine_waves, uv, noise
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
| 251 |
+
""" SourceModule for hn-nsf
|
| 252 |
+
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
| 253 |
+
add_noise_std=0.003, voiced_threshod=0)
|
| 254 |
+
sampling_rate: sampling_rate in Hz
|
| 255 |
+
harmonic_num: number of harmonic above F0 (default: 0)
|
| 256 |
+
sine_amp: amplitude of sine source signal (default: 0.1)
|
| 257 |
+
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
| 258 |
+
note that amplitude of noise in unvoiced is decided
|
| 259 |
+
by sine_amp
|
| 260 |
+
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
| 261 |
+
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
| 262 |
+
F0_sampled (batchsize, length, 1)
|
| 263 |
+
Sine_source (batchsize, length, 1)
|
| 264 |
+
noise_source (batchsize, length 1)
|
| 265 |
+
uv (batchsize, length, 1)
|
| 266 |
+
"""
|
| 267 |
+
|
| 268 |
+
def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1,
|
| 269 |
+
add_noise_std=0.003, voiced_threshod=0):
|
| 270 |
+
super(SourceModuleHnNSF, self).__init__()
|
| 271 |
+
|
| 272 |
+
self.sine_amp = sine_amp
|
| 273 |
+
self.noise_std = add_noise_std
|
| 274 |
+
|
| 275 |
+
# to produce sine waveforms
|
| 276 |
+
self.l_sin_gen = SineGen(sampling_rate, upsample_scale, harmonic_num,
|
| 277 |
+
sine_amp, add_noise_std, voiced_threshod)
|
| 278 |
+
|
| 279 |
+
# to merge source harmonics into a single excitation
|
| 280 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
| 281 |
+
self.l_tanh = torch.nn.Tanh()
|
| 282 |
+
|
| 283 |
+
def forward(self, x):
|
| 284 |
+
"""
|
| 285 |
+
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
| 286 |
+
F0_sampled (batchsize, length, 1)
|
| 287 |
+
Sine_source (batchsize, length, 1)
|
| 288 |
+
noise_source (batchsize, length 1)
|
| 289 |
+
"""
|
| 290 |
+
# source for harmonic branch
|
| 291 |
+
with torch.no_grad():
|
| 292 |
+
sine_wavs, uv, _ = self.l_sin_gen(x)
|
| 293 |
+
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
| 294 |
+
|
| 295 |
+
# source for noise branch, in the same shape as uv
|
| 296 |
+
noise = torch.randn_like(uv) * self.sine_amp / 3
|
| 297 |
+
return sine_merge, noise, uv
|
| 298 |
+
def padDiff(x):
|
| 299 |
+
return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
class Generator(torch.nn.Module):
|
| 303 |
+
def __init__(self, style_dim, resblock_kernel_sizes, upsample_rates, upsample_initial_channel, resblock_dilation_sizes, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size):
|
| 304 |
+
super(Generator, self).__init__()
|
| 305 |
+
|
| 306 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
| 307 |
+
self.num_upsamples = len(upsample_rates)
|
| 308 |
+
resblock = AdaINResBlock1
|
| 309 |
+
|
| 310 |
+
self.m_source = SourceModuleHnNSF(
|
| 311 |
+
sampling_rate=24000,
|
| 312 |
+
upsample_scale=np.prod(upsample_rates) * gen_istft_hop_size,
|
| 313 |
+
harmonic_num=8, voiced_threshod=10)
|
| 314 |
+
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates) * gen_istft_hop_size)
|
| 315 |
+
self.noise_convs = nn.ModuleList()
|
| 316 |
+
self.noise_res = nn.ModuleList()
|
| 317 |
+
|
| 318 |
+
self.ups = nn.ModuleList()
|
| 319 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
| 320 |
+
self.ups.append(weight_norm(
|
| 321 |
+
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
|
| 322 |
+
k, u, padding=(k-u)//2)))
|
| 323 |
+
|
| 324 |
+
self.resblocks = nn.ModuleList()
|
| 325 |
+
for i in range(len(self.ups)):
|
| 326 |
+
ch = upsample_initial_channel//(2**(i+1))
|
| 327 |
+
for j, (k, d) in enumerate(zip(resblock_kernel_sizes,resblock_dilation_sizes)):
|
| 328 |
+
self.resblocks.append(resblock(ch, k, d, style_dim))
|
| 329 |
+
|
| 330 |
+
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
| 331 |
+
|
| 332 |
+
if i + 1 < len(upsample_rates): #
|
| 333 |
+
stride_f0 = np.prod(upsample_rates[i + 1:])
|
| 334 |
+
self.noise_convs.append(Conv1d(
|
| 335 |
+
gen_istft_n_fft + 2, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+1) // 2))
|
| 336 |
+
self.noise_res.append(resblock(c_cur, 7, [1,3,5], style_dim))
|
| 337 |
+
else:
|
| 338 |
+
self.noise_convs.append(Conv1d(gen_istft_n_fft + 2, c_cur, kernel_size=1))
|
| 339 |
+
self.noise_res.append(resblock(c_cur, 11, [1,3,5], style_dim))
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
self.post_n_fft = gen_istft_n_fft
|
| 343 |
+
self.conv_post = weight_norm(Conv1d(ch, self.post_n_fft + 2, 7, 1, padding=3))
|
| 344 |
+
self.ups.apply(init_weights)
|
| 345 |
+
self.conv_post.apply(init_weights)
|
| 346 |
+
self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))
|
| 347 |
+
self.stft = TorchSTFT(filter_length=gen_istft_n_fft, hop_length=gen_istft_hop_size, win_length=gen_istft_n_fft)
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def forward(self, x, s, f0):
|
| 351 |
+
with torch.no_grad():
|
| 352 |
+
f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
|
| 353 |
+
|
| 354 |
+
har_source, noi_source, uv = self.m_source(f0)
|
| 355 |
+
har_source = har_source.transpose(1, 2).squeeze(1)
|
| 356 |
+
har_spec, har_phase = self.stft.transform(har_source)
|
| 357 |
+
har = torch.cat([har_spec, har_phase], dim=1)
|
| 358 |
+
|
| 359 |
+
for i in range(self.num_upsamples):
|
| 360 |
+
x = F.leaky_relu(x, LRELU_SLOPE)
|
| 361 |
+
x_source = self.noise_convs[i](har)
|
| 362 |
+
x_source = self.noise_res[i](x_source, s)
|
| 363 |
+
|
| 364 |
+
x = self.ups[i](x)
|
| 365 |
+
if i == self.num_upsamples - 1:
|
| 366 |
+
x = self.reflection_pad(x)
|
| 367 |
+
|
| 368 |
+
x = x + x_source
|
| 369 |
+
xs = None
|
| 370 |
+
for j in range(self.num_kernels):
|
| 371 |
+
if xs is None:
|
| 372 |
+
xs = self.resblocks[i*self.num_kernels+j](x, s)
|
| 373 |
+
else:
|
| 374 |
+
xs += self.resblocks[i*self.num_kernels+j](x, s)
|
| 375 |
+
x = xs / self.num_kernels
|
| 376 |
+
x = F.leaky_relu(x)
|
| 377 |
+
x = self.conv_post(x)
|
| 378 |
+
spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :])
|
| 379 |
+
phase = torch.sin(x[:, self.post_n_fft // 2 + 1:, :])
|
| 380 |
+
return self.stft.inverse(spec, phase)
|
| 381 |
+
|
| 382 |
+
def fw_phase(self, x, s):
|
| 383 |
+
for i in range(self.num_upsamples):
|
| 384 |
+
x = F.leaky_relu(x, LRELU_SLOPE)
|
| 385 |
+
x = self.ups[i](x)
|
| 386 |
+
xs = None
|
| 387 |
+
for j in range(self.num_kernels):
|
| 388 |
+
if xs is None:
|
| 389 |
+
xs = self.resblocks[i*self.num_kernels+j](x, s)
|
| 390 |
+
else:
|
| 391 |
+
xs += self.resblocks[i*self.num_kernels+j](x, s)
|
| 392 |
+
x = xs / self.num_kernels
|
| 393 |
+
x = F.leaky_relu(x)
|
| 394 |
+
x = self.reflection_pad(x)
|
| 395 |
+
x = self.conv_post(x)
|
| 396 |
+
spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :])
|
| 397 |
+
phase = torch.sin(x[:, self.post_n_fft // 2 + 1:, :])
|
| 398 |
+
return spec, phase
|
| 399 |
+
|
| 400 |
+
def remove_weight_norm(self):
|
| 401 |
+
print('Removing weight norm...')
|
| 402 |
+
for l in self.ups:
|
| 403 |
+
remove_weight_norm(l)
|
| 404 |
+
for l in self.resblocks:
|
| 405 |
+
l.remove_weight_norm()
|
| 406 |
+
remove_weight_norm(self.conv_pre)
|
| 407 |
+
remove_weight_norm(self.conv_post)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
class AdainResBlk1d(nn.Module):
|
| 411 |
+
def __init__(self, dim_in, dim_out, style_dim=64, actv=nn.LeakyReLU(0.2),
|
| 412 |
+
upsample='none', dropout_p=0.0):
|
| 413 |
+
super().__init__()
|
| 414 |
+
self.actv = actv
|
| 415 |
+
self.upsample_type = upsample
|
| 416 |
+
self.upsample = UpSample1d(upsample)
|
| 417 |
+
self.learned_sc = dim_in != dim_out
|
| 418 |
+
self._build_weights(dim_in, dim_out, style_dim)
|
| 419 |
+
self.dropout = nn.Dropout(dropout_p)
|
| 420 |
+
|
| 421 |
+
if upsample == 'none':
|
| 422 |
+
self.pool = nn.Identity()
|
| 423 |
+
else:
|
| 424 |
+
self.pool = weight_norm(nn.ConvTranspose1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1, output_padding=1))
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
def _build_weights(self, dim_in, dim_out, style_dim):
|
| 428 |
+
self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1))
|
| 429 |
+
self.conv2 = weight_norm(nn.Conv1d(dim_out, dim_out, 3, 1, 1))
|
| 430 |
+
self.norm1 = AdaIN1d(style_dim, dim_in)
|
| 431 |
+
self.norm2 = AdaIN1d(style_dim, dim_out)
|
| 432 |
+
if self.learned_sc:
|
| 433 |
+
self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False))
|
| 434 |
+
|
| 435 |
+
def _shortcut(self, x):
|
| 436 |
+
x = self.upsample(x)
|
| 437 |
+
if self.learned_sc:
|
| 438 |
+
x = self.conv1x1(x)
|
| 439 |
+
return x
|
| 440 |
+
|
| 441 |
+
def _residual(self, x, s):
|
| 442 |
+
x = self.norm1(x, s)
|
| 443 |
+
x = self.actv(x)
|
| 444 |
+
x = self.pool(x)
|
| 445 |
+
x = self.conv1(self.dropout(x))
|
| 446 |
+
x = self.norm2(x, s)
|
| 447 |
+
x = self.actv(x)
|
| 448 |
+
x = self.conv2(self.dropout(x))
|
| 449 |
+
return x
|
| 450 |
+
|
| 451 |
+
def forward(self, x, s):
|
| 452 |
+
out = self._residual(x, s)
|
| 453 |
+
out = (out + self._shortcut(x)) / math.sqrt(2)
|
| 454 |
+
return out
|
| 455 |
+
|
| 456 |
+
class UpSample1d(nn.Module):
|
| 457 |
+
def __init__(self, layer_type):
|
| 458 |
+
super().__init__()
|
| 459 |
+
self.layer_type = layer_type
|
| 460 |
+
|
| 461 |
+
def forward(self, x):
|
| 462 |
+
if self.layer_type == 'none':
|
| 463 |
+
return x
|
| 464 |
+
else:
|
| 465 |
+
return F.interpolate(x, scale_factor=2, mode='nearest')
|
| 466 |
+
|
| 467 |
+
class Decoder(nn.Module):
|
| 468 |
+
def __init__(self, dim_in=512, F0_channel=512, style_dim=64, dim_out=80,
|
| 469 |
+
resblock_kernel_sizes = [3,7,11],
|
| 470 |
+
upsample_rates = [10, 6],
|
| 471 |
+
upsample_initial_channel=512,
|
| 472 |
+
resblock_dilation_sizes=[[1,3,5], [1,3,5], [1,3,5]],
|
| 473 |
+
upsample_kernel_sizes=[20, 12],
|
| 474 |
+
gen_istft_n_fft=20, gen_istft_hop_size=5):
|
| 475 |
+
super().__init__()
|
| 476 |
+
|
| 477 |
+
self.decode = nn.ModuleList()
|
| 478 |
+
|
| 479 |
+
self.encode = AdainResBlk1d(dim_in + 2, 1024, style_dim)
|
| 480 |
+
|
| 481 |
+
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim))
|
| 482 |
+
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim))
|
| 483 |
+
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim))
|
| 484 |
+
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 512, style_dim, upsample=True))
|
| 485 |
+
|
| 486 |
+
self.F0_conv = weight_norm(nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1))
|
| 487 |
+
|
| 488 |
+
self.N_conv = weight_norm(nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1))
|
| 489 |
+
|
| 490 |
+
self.asr_res = nn.Sequential(
|
| 491 |
+
weight_norm(nn.Conv1d(512, 64, kernel_size=1)),
|
| 492 |
+
)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
self.generator = Generator(style_dim, resblock_kernel_sizes, upsample_rates,
|
| 496 |
+
upsample_initial_channel, resblock_dilation_sizes,
|
| 497 |
+
upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size)
|
| 498 |
+
|
| 499 |
+
def forward(self, asr, F0_curve, N, s):
|
| 500 |
+
if self.training:
|
| 501 |
+
downlist = [0, 3, 7]
|
| 502 |
+
F0_down = downlist[random.randint(0, 2)]
|
| 503 |
+
downlist = [0, 3, 7, 15]
|
| 504 |
+
N_down = downlist[random.randint(0, 3)]
|
| 505 |
+
if F0_down:
|
| 506 |
+
F0_curve = nn.functional.conv1d(F0_curve.unsqueeze(1), torch.ones(1, 1, F0_down).to('cuda'), padding=F0_down//2).squeeze(1) / F0_down
|
| 507 |
+
if N_down:
|
| 508 |
+
N = nn.functional.conv1d(N.unsqueeze(1), torch.ones(1, 1, N_down).to('cuda'), padding=N_down//2).squeeze(1) / N_down
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
F0 = self.F0_conv(F0_curve.unsqueeze(1))
|
| 512 |
+
N = self.N_conv(N.unsqueeze(1))
|
| 513 |
+
|
| 514 |
+
x = torch.cat([asr, F0, N], axis=1)
|
| 515 |
+
x = self.encode(x, s)
|
| 516 |
+
|
| 517 |
+
asr_res = self.asr_res(asr)
|
| 518 |
+
|
| 519 |
+
res = True
|
| 520 |
+
for block in self.decode:
|
| 521 |
+
if res:
|
| 522 |
+
x = torch.cat([x, asr_res, F0, N], axis=1)
|
| 523 |
+
x = block(x, s)
|
| 524 |
+
if block.upsample_type != "none":
|
| 525 |
+
res = False
|
| 526 |
+
|
| 527 |
+
x = self.generator(x, s, F0_curve)
|
| 528 |
+
return x
|
| 529 |
+
|
| 530 |
+
|
Modules/slmadv.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
class SLMAdversarialLoss(torch.nn.Module):
|
| 6 |
+
|
| 7 |
+
def __init__(self, model, wl, sampler, min_len, max_len, batch_percentage=0.5, skip_update=10, sig=1.5):
|
| 8 |
+
super(SLMAdversarialLoss, self).__init__()
|
| 9 |
+
self.model = model
|
| 10 |
+
self.wl = wl
|
| 11 |
+
self.sampler = sampler
|
| 12 |
+
|
| 13 |
+
self.min_len = min_len
|
| 14 |
+
self.max_len = max_len
|
| 15 |
+
self.batch_percentage = batch_percentage
|
| 16 |
+
|
| 17 |
+
self.sig = sig
|
| 18 |
+
self.skip_update = skip_update
|
| 19 |
+
|
| 20 |
+
def forward(self, iters, y_rec_gt, y_rec_gt_pred, waves, mel_input_length, ref_text, ref_lengths, use_ind, s_trg, ref_s=None):
|
| 21 |
+
text_mask = length_to_mask(ref_lengths).to(ref_text.device)
|
| 22 |
+
bert_dur = self.model.bert(ref_text, attention_mask=(~text_mask).int())
|
| 23 |
+
d_en = self.model.bert_encoder(bert_dur).transpose(-1, -2)
|
| 24 |
+
|
| 25 |
+
if use_ind and np.random.rand() < 0.5:
|
| 26 |
+
s_preds = s_trg
|
| 27 |
+
else:
|
| 28 |
+
num_steps = np.random.randint(3, 5)
|
| 29 |
+
if ref_s is not None:
|
| 30 |
+
s_preds = self.sampler(noise = torch.randn_like(s_trg).unsqueeze(1).to(ref_text.device),
|
| 31 |
+
embedding=bert_dur,
|
| 32 |
+
embedding_scale=1,
|
| 33 |
+
features=ref_s, # reference from the same speaker as the embedding
|
| 34 |
+
embedding_mask_proba=0.1,
|
| 35 |
+
num_steps=num_steps).squeeze(1)
|
| 36 |
+
else:
|
| 37 |
+
s_preds = self.sampler(noise = torch.randn_like(s_trg).unsqueeze(1).to(ref_text.device),
|
| 38 |
+
embedding=bert_dur,
|
| 39 |
+
embedding_scale=1,
|
| 40 |
+
embedding_mask_proba=0.1,
|
| 41 |
+
num_steps=num_steps).squeeze(1)
|
| 42 |
+
|
| 43 |
+
s_dur = s_preds[:, 128:]
|
| 44 |
+
s = s_preds[:, :128]
|
| 45 |
+
|
| 46 |
+
d, _ = self.model.predictor(d_en, s_dur,
|
| 47 |
+
ref_lengths,
|
| 48 |
+
torch.randn(ref_lengths.shape[0], ref_lengths.max(), 2).to(ref_text.device),
|
| 49 |
+
text_mask)
|
| 50 |
+
|
| 51 |
+
bib = 0
|
| 52 |
+
|
| 53 |
+
output_lengths = []
|
| 54 |
+
attn_preds = []
|
| 55 |
+
|
| 56 |
+
# differentiable duration modeling
|
| 57 |
+
for _s2s_pred, _text_length in zip(d, ref_lengths):
|
| 58 |
+
|
| 59 |
+
_s2s_pred_org = _s2s_pred[:_text_length, :]
|
| 60 |
+
|
| 61 |
+
_s2s_pred = torch.sigmoid(_s2s_pred_org)
|
| 62 |
+
_dur_pred = _s2s_pred.sum(axis=-1)
|
| 63 |
+
|
| 64 |
+
l = int(torch.round(_s2s_pred.sum()).item())
|
| 65 |
+
t = torch.arange(0, l).expand(l)
|
| 66 |
+
|
| 67 |
+
t = torch.arange(0, l).unsqueeze(0).expand((len(_s2s_pred), l)).to(ref_text.device)
|
| 68 |
+
loc = torch.cumsum(_dur_pred, dim=0) - _dur_pred / 2
|
| 69 |
+
|
| 70 |
+
h = torch.exp(-0.5 * torch.square(t - (l - loc.unsqueeze(-1))) / (self.sig)**2)
|
| 71 |
+
|
| 72 |
+
out = torch.nn.functional.conv1d(_s2s_pred_org.unsqueeze(0),
|
| 73 |
+
h.unsqueeze(1),
|
| 74 |
+
padding=h.shape[-1] - 1, groups=int(_text_length))[..., :l]
|
| 75 |
+
attn_preds.append(F.softmax(out.squeeze(), dim=0))
|
| 76 |
+
|
| 77 |
+
output_lengths.append(l)
|
| 78 |
+
|
| 79 |
+
max_len = max(output_lengths)
|
| 80 |
+
|
| 81 |
+
with torch.no_grad():
|
| 82 |
+
t_en = self.model.text_encoder(ref_text, ref_lengths, text_mask)
|
| 83 |
+
|
| 84 |
+
s2s_attn = torch.zeros(len(ref_lengths), int(ref_lengths.max()), max_len).to(ref_text.device)
|
| 85 |
+
for bib in range(len(output_lengths)):
|
| 86 |
+
s2s_attn[bib, :ref_lengths[bib], :output_lengths[bib]] = attn_preds[bib]
|
| 87 |
+
|
| 88 |
+
asr_pred = t_en @ s2s_attn
|
| 89 |
+
|
| 90 |
+
_, p_pred = self.model.predictor(d_en, s_dur,
|
| 91 |
+
ref_lengths,
|
| 92 |
+
s2s_attn,
|
| 93 |
+
text_mask)
|
| 94 |
+
|
| 95 |
+
mel_len = max(int(min(output_lengths) / 2 - 1), self.min_len // 2)
|
| 96 |
+
mel_len = min(mel_len, self.max_len // 2)
|
| 97 |
+
|
| 98 |
+
# get clips
|
| 99 |
+
|
| 100 |
+
en = []
|
| 101 |
+
p_en = []
|
| 102 |
+
sp = []
|
| 103 |
+
|
| 104 |
+
F0_fakes = []
|
| 105 |
+
N_fakes = []
|
| 106 |
+
|
| 107 |
+
wav = []
|
| 108 |
+
|
| 109 |
+
for bib in range(len(output_lengths)):
|
| 110 |
+
mel_length_pred = output_lengths[bib]
|
| 111 |
+
mel_length_gt = int(mel_input_length[bib].item() / 2)
|
| 112 |
+
if mel_length_gt <= mel_len or mel_length_pred <= mel_len:
|
| 113 |
+
continue
|
| 114 |
+
|
| 115 |
+
sp.append(s_preds[bib])
|
| 116 |
+
|
| 117 |
+
random_start = np.random.randint(0, mel_length_pred - mel_len)
|
| 118 |
+
en.append(asr_pred[bib, :, random_start:random_start+mel_len])
|
| 119 |
+
p_en.append(p_pred[bib, :, random_start:random_start+mel_len])
|
| 120 |
+
|
| 121 |
+
# get ground truth clips
|
| 122 |
+
random_start = np.random.randint(0, mel_length_gt - mel_len)
|
| 123 |
+
y = waves[bib][(random_start * 2) * 300:((random_start+mel_len) * 2) * 300]
|
| 124 |
+
wav.append(torch.from_numpy(y).to(ref_text.device))
|
| 125 |
+
|
| 126 |
+
if len(wav) >= self.batch_percentage * len(waves): # prevent OOM due to longer lengths
|
| 127 |
+
break
|
| 128 |
+
|
| 129 |
+
if len(sp) <= 1:
|
| 130 |
+
return None
|
| 131 |
+
|
| 132 |
+
sp = torch.stack(sp)
|
| 133 |
+
wav = torch.stack(wav).float()
|
| 134 |
+
en = torch.stack(en)
|
| 135 |
+
p_en = torch.stack(p_en)
|
| 136 |
+
|
| 137 |
+
F0_fake, N_fake = self.model.predictor.F0Ntrain(p_en, sp[:, 128:])
|
| 138 |
+
y_pred = self.model.decoder(en, F0_fake, N_fake, sp[:, :128])
|
| 139 |
+
|
| 140 |
+
# discriminator loss
|
| 141 |
+
if (iters + 1) % self.skip_update == 0:
|
| 142 |
+
if np.random.randint(0, 2) == 0:
|
| 143 |
+
wav = y_rec_gt_pred
|
| 144 |
+
use_rec = True
|
| 145 |
+
else:
|
| 146 |
+
use_rec = False
|
| 147 |
+
|
| 148 |
+
crop_size = min(wav.size(-1), y_pred.size(-1))
|
| 149 |
+
if use_rec: # use reconstructed (shorter lengths), do length invariant regularization
|
| 150 |
+
if wav.size(-1) > y_pred.size(-1):
|
| 151 |
+
real_GP = wav[:, : , :crop_size]
|
| 152 |
+
out_crop = self.wl.discriminator_forward(real_GP.detach().squeeze())
|
| 153 |
+
out_org = self.wl.discriminator_forward(wav.detach().squeeze())
|
| 154 |
+
loss_reg = F.l1_loss(out_crop, out_org[..., :out_crop.size(-1)])
|
| 155 |
+
|
| 156 |
+
if np.random.randint(0, 2) == 0:
|
| 157 |
+
d_loss = self.wl.discriminator(real_GP.detach().squeeze(), y_pred.detach().squeeze()).mean()
|
| 158 |
+
else:
|
| 159 |
+
d_loss = self.wl.discriminator(wav.detach().squeeze(), y_pred.detach().squeeze()).mean()
|
| 160 |
+
else:
|
| 161 |
+
real_GP = y_pred[:, : , :crop_size]
|
| 162 |
+
out_crop = self.wl.discriminator_forward(real_GP.detach().squeeze())
|
| 163 |
+
out_org = self.wl.discriminator_forward(y_pred.detach().squeeze())
|
| 164 |
+
loss_reg = F.l1_loss(out_crop, out_org[..., :out_crop.size(-1)])
|
| 165 |
+
|
| 166 |
+
if np.random.randint(0, 2) == 0:
|
| 167 |
+
d_loss = self.wl.discriminator(wav.detach().squeeze(), real_GP.detach().squeeze()).mean()
|
| 168 |
+
else:
|
| 169 |
+
d_loss = self.wl.discriminator(wav.detach().squeeze(), y_pred.detach().squeeze()).mean()
|
| 170 |
+
|
| 171 |
+
# regularization (ignore length variation)
|
| 172 |
+
d_loss += loss_reg
|
| 173 |
+
|
| 174 |
+
out_gt = self.wl.discriminator_forward(y_rec_gt.detach().squeeze())
|
| 175 |
+
out_rec = self.wl.discriminator_forward(y_rec_gt_pred.detach().squeeze())
|
| 176 |
+
|
| 177 |
+
# regularization (ignore reconstruction artifacts)
|
| 178 |
+
d_loss += F.l1_loss(out_gt, out_rec)
|
| 179 |
+
|
| 180 |
+
else:
|
| 181 |
+
d_loss = self.wl.discriminator(wav.detach().squeeze(), y_pred.detach().squeeze()).mean()
|
| 182 |
+
else:
|
| 183 |
+
d_loss = 0
|
| 184 |
+
|
| 185 |
+
# generator loss
|
| 186 |
+
gen_loss = self.wl.generator(y_pred.squeeze())
|
| 187 |
+
|
| 188 |
+
gen_loss = gen_loss.mean()
|
| 189 |
+
|
| 190 |
+
return d_loss, gen_loss, y_pred.detach().cpu().numpy()
|
| 191 |
+
|
| 192 |
+
def length_to_mask(lengths):
|
| 193 |
+
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
|
| 194 |
+
mask = torch.gt(mask+1, lengths.unsqueeze(1))
|
| 195 |
+
return mask
|
Modules/utils.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def init_weights(m, mean=0.0, std=0.01):
|
| 2 |
+
classname = m.__class__.__name__
|
| 3 |
+
if classname.find("Conv") != -1:
|
| 4 |
+
m.weight.data.normal_(mean, std)
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def apply_weight_norm(m):
|
| 8 |
+
classname = m.__class__.__name__
|
| 9 |
+
if classname.find("Conv") != -1:
|
| 10 |
+
weight_norm(m)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_padding(kernel_size, dilation=1):
|
| 14 |
+
return int((kernel_size*dilation - dilation)/2)
|
README.md
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# StyleTTS 2: Towards Human-Level Text-to-Speech through Style Diffusion and Adversarial Training with Large Speech Language Models
|
| 2 |
+
|
| 3 |
+
### Yinghao Aaron Li, Cong Han, Vinay S. Raghavan, Gavin Mischler, Nima Mesgarani
|
| 4 |
+
|
| 5 |
+
> In this paper, we present StyleTTS 2, a text-to-speech (TTS) model that leverages style diffusion and adversarial training with large speech language models (SLMs) to achieve human-level TTS synthesis. StyleTTS 2 differs from its predecessor by modeling styles as a latent random variable through diffusion models to generate the most suitable style for the text without requiring reference speech, achieving efficient latent diffusion while benefiting from the diverse speech synthesis offered by diffusion models. Furthermore, we employ large pre-trained SLMs, such as WavLM, as discriminators with our novel differentiable duration modeling for end-to-end training, resulting in improved speech naturalness. StyleTTS 2 surpasses human recordings on the single-speaker LJSpeech dataset and matches it on the multispeaker VCTK dataset as judged by native English speakers. Moreover, when trained on the LibriTTS dataset, our model outperforms previous publicly available models for zero-shot speaker adaptation. This work achieves the first human-level TTS synthesis on both single and multispeaker datasets, showcasing the potential of style diffusion and adversarial training with large SLMs.
|
| 6 |
+
|
| 7 |
+
Paper: [https://arxiv.org/abs/2306.07691](https://arxiv.org/abs/2306.07691)
|
| 8 |
+
|
| 9 |
+
Audio samples: [https://styletts2.github.io/](https://styletts2.github.io/)
|
| 10 |
+
|
| 11 |
+
Online demo: [Hugging Face](https://huggingface.co/spaces/styletts2/styletts2) (thank [@fakerybakery](https://github.com/fakerybakery) for the wonderful online demo)
|
| 12 |
+
|
| 13 |
+
[](https://colab.research.google.com/github/yl4579/StyleTTS2/blob/main/) [](https://discord.gg/ha8sxdG2K4)
|
| 14 |
+
|
| 15 |
+
## TODO
|
| 16 |
+
- [x] Training and inference demo code for single-speaker models (LJSpeech)
|
| 17 |
+
- [x] Test training code for multi-speaker models (VCTK and LibriTTS)
|
| 18 |
+
- [x] Finish demo code for multispeaker model and upload pre-trained models
|
| 19 |
+
- [x] Add a finetuning script for new speakers with base pre-trained multispeaker models
|
| 20 |
+
- [ ] Fix DDP (accelerator) for `train_second.py` **(I have tried everything I could to fix this but had no success, so if you are willing to help, please see [#7](https://github.com/yl4579/StyleTTS2/issues/7))**
|
| 21 |
+
|
| 22 |
+
## Pre-requisites
|
| 23 |
+
1. Python >= 3.7
|
| 24 |
+
2. Clone this repository:
|
| 25 |
+
```bash
|
| 26 |
+
git clone https://github.com/yl4579/StyleTTS2.git
|
| 27 |
+
cd StyleTTS2
|
| 28 |
+
```
|
| 29 |
+
3. Install python requirements:
|
| 30 |
+
```bash
|
| 31 |
+
pip install -r requirements.txt
|
| 32 |
+
```
|
| 33 |
+
On Windows add:
|
| 34 |
+
```bash
|
| 35 |
+
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 -U
|
| 36 |
+
```
|
| 37 |
+
Also install phonemizer and espeak if you want to run the demo:
|
| 38 |
+
```bash
|
| 39 |
+
pip install phonemizer
|
| 40 |
+
sudo apt-get install espeak-ng
|
| 41 |
+
```
|
| 42 |
+
4. Download and extract the [LJSpeech dataset](https://keithito.com/LJ-Speech-Dataset/), unzip to the data folder and upsample the data to 24 kHz. The text aligner and pitch extractor are pre-trained on 24 kHz data, but you can easily change the preprocessing and re-train them using your own preprocessing.
|
| 43 |
+
For LibriTTS, you will need to combine train-clean-360 with train-clean-100 and rename the folder train-clean-460 (see [val_list_libritts.txt](https://github.com/yl4579/StyleTTS/blob/main/Data/val_list_libritts.txt) as an example).
|
| 44 |
+
|
| 45 |
+
## Training
|
| 46 |
+
First stage training:
|
| 47 |
+
```bash
|
| 48 |
+
accelerate launch train_first.py --config_path ./Configs/config.yml
|
| 49 |
+
```
|
| 50 |
+
Second stage training **(DDP version not working, so the current version uses DP, again see [#7](https://github.com/yl4579/StyleTTS2/issues/7) if you want to help)**:
|
| 51 |
+
```bash
|
| 52 |
+
python train_second.py --config_path ./Configs/config.yml
|
| 53 |
+
```
|
| 54 |
+
You can run both consecutively and it will train both the first and second stages. The model will be saved in the format "epoch_1st_%05d.pth" and "epoch_2nd_%05d.pth". Checkpoints and Tensorboard logs will be saved at `log_dir`.
|
| 55 |
+
|
| 56 |
+
The data list format needs to be `filename.wav|transcription|speaker`, see [val_list.txt](https://github.com/yl4579/StyleTTS2/blob/main/Data/val_list.txt) as an example. The speaker labels are needed for multi-speaker models because we need to sample reference audio for style diffusion model training.
|
| 57 |
+
|
| 58 |
+
### Important Configurations
|
| 59 |
+
In [config.yml](https://github.com/yl4579/StyleTTS2/blob/main/Configs/config.yml), there are a few important configurations to take care of:
|
| 60 |
+
- `OOD_data`: The path for out-of-distribution texts for SLM adversarial training. The format should be `text|anything`.
|
| 61 |
+
- `min_length`: Minimum length of OOD texts for training. This is to make sure the synthesized speech has a minimum length.
|
| 62 |
+
- `max_len`: Maximum length of audio for training. The unit is frame. Since the default hop size is 300, one frame is approximately `300 / 24000` (0.0125) second. Lowering this if you encounter the out-of-memory issue.
|
| 63 |
+
- `multispeaker`: Set to true if you want to train a multispeaker model. This is needed because the architecture of the denoiser is different for single and multispeaker models.
|
| 64 |
+
- `batch_percentage`: This is to make sure during SLM adversarial training there are no out-of-memory (OOM) issues. If you encounter OOM problem, please set a lower number for this.
|
| 65 |
+
|
| 66 |
+
### Pre-trained modules
|
| 67 |
+
In [Utils](https://github.com/yl4579/StyleTTS2/tree/main/Utils) folder, there are three pre-trained models:
|
| 68 |
+
- **[ASR](https://github.com/yl4579/StyleTTS2/tree/main/Utils/ASR) folder**: It contains the pre-trained text aligner, which was pre-trained on English (LibriTTS), Japanese (JVS), and Chinese (AiShell) corpus. It works well for most other languages without fine-tuning, but you can always train your own text aligner with the code here: [yl4579/AuxiliaryASR](https://github.com/yl4579/AuxiliaryASR).
|
| 69 |
+
- **[JDC](https://github.com/yl4579/StyleTTS2/tree/main/Utils/JDC) folder**: It contains the pre-trained pitch extractor, which was pre-trained on English (LibriTTS) corpus only. However, it works well for other languages too because F0 is independent of language. If you want to train on singing corpus, it is recommended to train a new pitch extractor with the code here: [yl4579/PitchExtractor](https://github.com/yl4579/PitchExtractor).
|
| 70 |
+
- **[PLBERT](https://github.com/yl4579/StyleTTS2/tree/main/Utils/PLBERT) folder**: It contains the pre-trained [PL-BERT](https://arxiv.org/abs/2301.08810) model, which was pre-trained on English (Wikipedia) corpus only. It probably does not work very well on other languages, so you will need to train a different PL-BERT for different languages using the repo here: [yl4579/PL-BERT](https://github.com/yl4579/PL-BERT). You can also use the [multilingual PL-BERT](https://huggingface.co/papercup-ai/multilingual-pl-bert) which supports 14 languages.
|
| 71 |
+
|
| 72 |
+
### Common Issues
|
| 73 |
+
- **Loss becomes NaN**: If it is the first stage, please make sure you do not use mixed precision, as it can cause loss becoming NaN for some particular datasets when the batch size is not set properly (need to be more than 16 to work well). For the second stage, please also experiment with different batch sizes, with higher batch sizes being more likely to cause NaN loss values. We recommend the batch size to be 16. You can refer to issues [#10](https://github.com/yl4579/StyleTTS2/issues/10) and [#11](https://github.com/yl4579/StyleTTS2/issues/11) for more details.
|
| 74 |
+
- **Out of memory**: Please either use lower `batch_size` or `max_len`. You may refer to issue [#10](https://github.com/yl4579/StyleTTS2/issues/10) for more information.
|
| 75 |
+
- **Non-English dataset**: You can train on any language you want, but you will need to use a pre-trained PL-BERT model for that language. We have a pre-trained [multilingual PL-BERT](https://huggingface.co/papercup-ai/multilingual-pl-bert) that supports 14 languages. You may refer to [yl4579/StyleTTS#10](https://github.com/yl4579/StyleTTS/issues/10) and [#70](https://github.com/yl4579/StyleTTS2/issues/70) for some examples to train on Chinese datasets.
|
| 76 |
+
|
| 77 |
+
## Finetuning
|
| 78 |
+
The script is modified from `train_second.py` which uses DP, as DDP does not work for `train_second.py`. Please see the bold section above if you are willing to help with this problem.
|
| 79 |
+
```bash
|
| 80 |
+
python train_finetune.py --config_path ./Configs/config_ft.yml
|
| 81 |
+
```
|
| 82 |
+
Please make sure you have the LibriTTS checkpoint downloaded and unzipped under the folder. The default configuration `config_ft.yml` finetunes on LJSpeech with 1 hour of speech data (around 1k samples) for 50 epochs. This took about 4 hours to finish on four NVidia A100. The quality is slightly worse (similar to NaturalSpeech on LJSpeech) than LJSpeech model trained from scratch with 24 hours of speech data, which took around 2.5 days to finish on four A100. The samples can be found at [#65 (comment)](https://github.com/yl4579/StyleTTS2/discussions/65#discussioncomment-7668393).
|
| 83 |
+
|
| 84 |
+
If you are using a **single GPU** (because the script doesn't work with DDP) and want to save training speed and VRAM, you can do (thank [@korakoe](https://github.com/korakoe) for making the script at [#100](https://github.com/yl4579/StyleTTS2/pull/100)):
|
| 85 |
+
```bash
|
| 86 |
+
accelerate launch --mixed_precision=fp16 --num_processes=1 train_finetune_accelerate.py --config_path ./Configs/config_ft.yml
|
| 87 |
+
```
|
| 88 |
+
[](https://colab.research.google.com/github/yl4579/StyleTTS2/blob/main/Colab/StyleTTS2_Finetune_Demo.ipynb)
|
| 89 |
+
|
| 90 |
+
### Common Issues
|
| 91 |
+
[@Kreevoz](https://github.com/Kreevoz) has made detailed notes on common issues in finetuning, with suggestions in maximizing audio quality: [#81](https://github.com/yl4579/StyleTTS2/discussions/81). Some of these also apply to training from scratch. [@IIEleven11](https://github.com/IIEleven11) has also made a guideline for fine-tuning: [#128](https://github.com/yl4579/StyleTTS2/discussions/128).
|
| 92 |
+
|
| 93 |
+
- **Out of memory after `joint_epoch`**: This is likely because your GPU RAM is not big enough for SLM adversarial training run. You may skip that but the quality could be worse. Setting `joint_epoch` a larger number than `epochs` could skip the SLM advesariral training.
|
| 94 |
+
|
| 95 |
+
## Inference
|
| 96 |
+
Please refer to [Inference_LJSpeech.ipynb](https://github.com/yl4579/StyleTTS2/blob/main/Demo/Inference_LJSpeech.ipynb) (single-speaker) and [Inference_LibriTTS.ipynb](https://github.com/yl4579/StyleTTS2/blob/main/Demo/Inference_LibriTTS.ipynb) (multi-speaker) for details. For LibriTTS, you will also need to download [reference_audio.zip](https://huggingface.co/yl4579/StyleTTS2-LibriTTS/resolve/main/reference_audio.zip) and unzip it under the `demo` before running the demo.
|
| 97 |
+
|
| 98 |
+
- The pretrained StyleTTS 2 on LJSpeech corpus in 24 kHz can be downloaded at [https://huggingface.co/yl4579/StyleTTS2-LJSpeech/tree/main](https://huggingface.co/yl4579/StyleTTS2-LJSpeech/tree/main).
|
| 99 |
+
|
| 100 |
+
[](https://colab.research.google.com/github/yl4579/StyleTTS2/blob/main/Colab/StyleTTS2_Demo_LJSpeech.ipynb)
|
| 101 |
+
|
| 102 |
+
- The pretrained StyleTTS 2 model on LibriTTS can be downloaded at [https://huggingface.co/yl4579/StyleTTS2-LibriTTS/tree/main](https://huggingface.co/yl4579/StyleTTS2-LibriTTS/tree/main).
|
| 103 |
+
|
| 104 |
+
[](https://colab.research.google.com/github/yl4579/StyleTTS2/blob/main/Colab/StyleTTS2_Demo_LibriTTS.ipynb)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
You can import StyleTTS 2 and run it in your own code. However, the inference depends on a GPL-licensed package, so it is not included directly in this repository. A [GPL-licensed fork](https://github.com/NeuralVox/StyleTTS2) has an importable script, as well as an experimental streaming API, etc. A [fully MIT-licensed package](https://pypi.org/project/styletts2/) that uses gruut (albeit lower quality due to mismatch between phonemizer and gruut) is also available.
|
| 108 |
+
|
| 109 |
+
***Before using these pre-trained models, you agree to inform the listeners that the speech samples are synthesized by the pre-trained models, unless you have the permission to use the voice you synthesize. That is, you agree to only use voices whose speakers grant the permission to have their voice cloned, either directly or by license before making synthesized voices public, or you have to publicly announce that these voices are synthesized if you do not have the permission to use these voices.***
|
| 110 |
+
|
| 111 |
+
### Common Issues
|
| 112 |
+
- **High-pitched background noise**: This is caused by numerical float differences in older GPUs. For more details, please refer to issue [#13](https://github.com/yl4579/StyleTTS2/issues/13). Basically, you will need to use more modern GPUs or do inference on CPUs.
|
| 113 |
+
- **Pre-trained model license**: You only need to abide by the above rules if you use **the pre-trained models** and the voices are **NOT** in the training set, i.e., your reference speakers are not from any open access dataset. For more details of rules to use the pre-trained models, please see [#37](https://github.com/yl4579/StyleTTS2/issues/37).
|
| 114 |
+
|
| 115 |
+
## References
|
| 116 |
+
- [archinetai/audio-diffusion-pytorch](https://github.com/archinetai/audio-diffusion-pytorch)
|
| 117 |
+
- [jik876/hifi-gan](https://github.com/jik876/hifi-gan)
|
| 118 |
+
- [rishikksh20/iSTFTNet-pytorch](https://github.com/rishikksh20/iSTFTNet-pytorch)
|
| 119 |
+
- [nii-yamagishilab/project-NN-Pytorch-scripts/project/01-nsf](https://github.com/nii-yamagishilab/project-NN-Pytorch-scripts/tree/master/project/01-nsf)
|
| 120 |
+
|
| 121 |
+
## License
|
| 122 |
+
|
| 123 |
+
Code: MIT License
|
| 124 |
+
|
| 125 |
+
Pre-Trained Models: Before using these pre-trained models, you agree to inform the listeners that the speech samples are synthesized by the pre-trained models, unless you have the permission to use the voice you synthesize. That is, you agree to only use voices whose speakers grant the permission to have their voice cloned, either directly or by license before making synthesized voices public, or you have to publicly announce that these voices are synthesized if you do not have the permission to use these voices.
|
Utils/ASR/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
Utils/ASR/config.yml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
log_dir: "logs/20201006"
|
| 2 |
+
save_freq: 5
|
| 3 |
+
device: "cuda"
|
| 4 |
+
epochs: 180
|
| 5 |
+
batch_size: 64
|
| 6 |
+
pretrained_model: ""
|
| 7 |
+
train_data: "ASRDataset/train_list.txt"
|
| 8 |
+
val_data: "ASRDataset/val_list.txt"
|
| 9 |
+
|
| 10 |
+
dataset_params:
|
| 11 |
+
data_augmentation: false
|
| 12 |
+
|
| 13 |
+
preprocess_parasm:
|
| 14 |
+
sr: 24000
|
| 15 |
+
spect_params:
|
| 16 |
+
n_fft: 2048
|
| 17 |
+
win_length: 1200
|
| 18 |
+
hop_length: 300
|
| 19 |
+
mel_params:
|
| 20 |
+
n_mels: 80
|
| 21 |
+
|
| 22 |
+
model_params:
|
| 23 |
+
input_dim: 80
|
| 24 |
+
hidden_dim: 256
|
| 25 |
+
n_token: 191
|
| 26 |
+
token_embedding_dim: 512
|
| 27 |
+
|
| 28 |
+
optimizer_params:
|
| 29 |
+
lr: 0.0005
|
Utils/ASR/layers.py
ADDED
|
@@ -0,0 +1,354 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
from typing import Optional, Any
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
import torchaudio
|
| 8 |
+
import torchaudio.functional as audio_F
|
| 9 |
+
|
| 10 |
+
import random
|
| 11 |
+
random.seed(0)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _get_activation_fn(activ):
|
| 15 |
+
if activ == 'relu':
|
| 16 |
+
return nn.ReLU()
|
| 17 |
+
elif activ == 'lrelu':
|
| 18 |
+
return nn.LeakyReLU(0.2)
|
| 19 |
+
elif activ == 'swish':
|
| 20 |
+
return lambda x: x*torch.sigmoid(x)
|
| 21 |
+
else:
|
| 22 |
+
raise RuntimeError('Unexpected activ type %s, expected [relu, lrelu, swish]' % activ)
|
| 23 |
+
|
| 24 |
+
class LinearNorm(torch.nn.Module):
|
| 25 |
+
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
|
| 26 |
+
super(LinearNorm, self).__init__()
|
| 27 |
+
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
|
| 28 |
+
|
| 29 |
+
torch.nn.init.xavier_uniform_(
|
| 30 |
+
self.linear_layer.weight,
|
| 31 |
+
gain=torch.nn.init.calculate_gain(w_init_gain))
|
| 32 |
+
|
| 33 |
+
def forward(self, x):
|
| 34 |
+
return self.linear_layer(x)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class ConvNorm(torch.nn.Module):
|
| 38 |
+
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
|
| 39 |
+
padding=None, dilation=1, bias=True, w_init_gain='linear', param=None):
|
| 40 |
+
super(ConvNorm, self).__init__()
|
| 41 |
+
if padding is None:
|
| 42 |
+
assert(kernel_size % 2 == 1)
|
| 43 |
+
padding = int(dilation * (kernel_size - 1) / 2)
|
| 44 |
+
|
| 45 |
+
self.conv = torch.nn.Conv1d(in_channels, out_channels,
|
| 46 |
+
kernel_size=kernel_size, stride=stride,
|
| 47 |
+
padding=padding, dilation=dilation,
|
| 48 |
+
bias=bias)
|
| 49 |
+
|
| 50 |
+
torch.nn.init.xavier_uniform_(
|
| 51 |
+
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param))
|
| 52 |
+
|
| 53 |
+
def forward(self, signal):
|
| 54 |
+
conv_signal = self.conv(signal)
|
| 55 |
+
return conv_signal
|
| 56 |
+
|
| 57 |
+
class CausualConv(nn.Module):
|
| 58 |
+
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=1, dilation=1, bias=True, w_init_gain='linear', param=None):
|
| 59 |
+
super(CausualConv, self).__init__()
|
| 60 |
+
if padding is None:
|
| 61 |
+
assert(kernel_size % 2 == 1)
|
| 62 |
+
padding = int(dilation * (kernel_size - 1) / 2) * 2
|
| 63 |
+
else:
|
| 64 |
+
self.padding = padding * 2
|
| 65 |
+
self.conv = nn.Conv1d(in_channels, out_channels,
|
| 66 |
+
kernel_size=kernel_size, stride=stride,
|
| 67 |
+
padding=self.padding,
|
| 68 |
+
dilation=dilation,
|
| 69 |
+
bias=bias)
|
| 70 |
+
|
| 71 |
+
torch.nn.init.xavier_uniform_(
|
| 72 |
+
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param))
|
| 73 |
+
|
| 74 |
+
def forward(self, x):
|
| 75 |
+
x = self.conv(x)
|
| 76 |
+
x = x[:, :, :-self.padding]
|
| 77 |
+
return x
|
| 78 |
+
|
| 79 |
+
class CausualBlock(nn.Module):
|
| 80 |
+
def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='lrelu'):
|
| 81 |
+
super(CausualBlock, self).__init__()
|
| 82 |
+
self.blocks = nn.ModuleList([
|
| 83 |
+
self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p)
|
| 84 |
+
for i in range(n_conv)])
|
| 85 |
+
|
| 86 |
+
def forward(self, x):
|
| 87 |
+
for block in self.blocks:
|
| 88 |
+
res = x
|
| 89 |
+
x = block(x)
|
| 90 |
+
x += res
|
| 91 |
+
return x
|
| 92 |
+
|
| 93 |
+
def _get_conv(self, hidden_dim, dilation, activ='lrelu', dropout_p=0.2):
|
| 94 |
+
layers = [
|
| 95 |
+
CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation),
|
| 96 |
+
_get_activation_fn(activ),
|
| 97 |
+
nn.BatchNorm1d(hidden_dim),
|
| 98 |
+
nn.Dropout(p=dropout_p),
|
| 99 |
+
CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1),
|
| 100 |
+
_get_activation_fn(activ),
|
| 101 |
+
nn.Dropout(p=dropout_p)
|
| 102 |
+
]
|
| 103 |
+
return nn.Sequential(*layers)
|
| 104 |
+
|
| 105 |
+
class ConvBlock(nn.Module):
|
| 106 |
+
def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='relu'):
|
| 107 |
+
super().__init__()
|
| 108 |
+
self._n_groups = 8
|
| 109 |
+
self.blocks = nn.ModuleList([
|
| 110 |
+
self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p)
|
| 111 |
+
for i in range(n_conv)])
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def forward(self, x):
|
| 115 |
+
for block in self.blocks:
|
| 116 |
+
res = x
|
| 117 |
+
x = block(x)
|
| 118 |
+
x += res
|
| 119 |
+
return x
|
| 120 |
+
|
| 121 |
+
def _get_conv(self, hidden_dim, dilation, activ='relu', dropout_p=0.2):
|
| 122 |
+
layers = [
|
| 123 |
+
ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation),
|
| 124 |
+
_get_activation_fn(activ),
|
| 125 |
+
nn.GroupNorm(num_groups=self._n_groups, num_channels=hidden_dim),
|
| 126 |
+
nn.Dropout(p=dropout_p),
|
| 127 |
+
ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1),
|
| 128 |
+
_get_activation_fn(activ),
|
| 129 |
+
nn.Dropout(p=dropout_p)
|
| 130 |
+
]
|
| 131 |
+
return nn.Sequential(*layers)
|
| 132 |
+
|
| 133 |
+
class LocationLayer(nn.Module):
|
| 134 |
+
def __init__(self, attention_n_filters, attention_kernel_size,
|
| 135 |
+
attention_dim):
|
| 136 |
+
super(LocationLayer, self).__init__()
|
| 137 |
+
padding = int((attention_kernel_size - 1) / 2)
|
| 138 |
+
self.location_conv = ConvNorm(2, attention_n_filters,
|
| 139 |
+
kernel_size=attention_kernel_size,
|
| 140 |
+
padding=padding, bias=False, stride=1,
|
| 141 |
+
dilation=1)
|
| 142 |
+
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
|
| 143 |
+
bias=False, w_init_gain='tanh')
|
| 144 |
+
|
| 145 |
+
def forward(self, attention_weights_cat):
|
| 146 |
+
processed_attention = self.location_conv(attention_weights_cat)
|
| 147 |
+
processed_attention = processed_attention.transpose(1, 2)
|
| 148 |
+
processed_attention = self.location_dense(processed_attention)
|
| 149 |
+
return processed_attention
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class Attention(nn.Module):
|
| 153 |
+
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
|
| 154 |
+
attention_location_n_filters, attention_location_kernel_size):
|
| 155 |
+
super(Attention, self).__init__()
|
| 156 |
+
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
|
| 157 |
+
bias=False, w_init_gain='tanh')
|
| 158 |
+
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
|
| 159 |
+
w_init_gain='tanh')
|
| 160 |
+
self.v = LinearNorm(attention_dim, 1, bias=False)
|
| 161 |
+
self.location_layer = LocationLayer(attention_location_n_filters,
|
| 162 |
+
attention_location_kernel_size,
|
| 163 |
+
attention_dim)
|
| 164 |
+
self.score_mask_value = -float("inf")
|
| 165 |
+
|
| 166 |
+
def get_alignment_energies(self, query, processed_memory,
|
| 167 |
+
attention_weights_cat):
|
| 168 |
+
"""
|
| 169 |
+
PARAMS
|
| 170 |
+
------
|
| 171 |
+
query: decoder output (batch, n_mel_channels * n_frames_per_step)
|
| 172 |
+
processed_memory: processed encoder outputs (B, T_in, attention_dim)
|
| 173 |
+
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
|
| 174 |
+
RETURNS
|
| 175 |
+
-------
|
| 176 |
+
alignment (batch, max_time)
|
| 177 |
+
"""
|
| 178 |
+
|
| 179 |
+
processed_query = self.query_layer(query.unsqueeze(1))
|
| 180 |
+
processed_attention_weights = self.location_layer(attention_weights_cat)
|
| 181 |
+
energies = self.v(torch.tanh(
|
| 182 |
+
processed_query + processed_attention_weights + processed_memory))
|
| 183 |
+
|
| 184 |
+
energies = energies.squeeze(-1)
|
| 185 |
+
return energies
|
| 186 |
+
|
| 187 |
+
def forward(self, attention_hidden_state, memory, processed_memory,
|
| 188 |
+
attention_weights_cat, mask):
|
| 189 |
+
"""
|
| 190 |
+
PARAMS
|
| 191 |
+
------
|
| 192 |
+
attention_hidden_state: attention rnn last output
|
| 193 |
+
memory: encoder outputs
|
| 194 |
+
processed_memory: processed encoder outputs
|
| 195 |
+
attention_weights_cat: previous and cummulative attention weights
|
| 196 |
+
mask: binary mask for padded data
|
| 197 |
+
"""
|
| 198 |
+
alignment = self.get_alignment_energies(
|
| 199 |
+
attention_hidden_state, processed_memory, attention_weights_cat)
|
| 200 |
+
|
| 201 |
+
if mask is not None:
|
| 202 |
+
alignment.data.masked_fill_(mask, self.score_mask_value)
|
| 203 |
+
|
| 204 |
+
attention_weights = F.softmax(alignment, dim=1)
|
| 205 |
+
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
|
| 206 |
+
attention_context = attention_context.squeeze(1)
|
| 207 |
+
|
| 208 |
+
return attention_context, attention_weights
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class ForwardAttentionV2(nn.Module):
|
| 212 |
+
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
|
| 213 |
+
attention_location_n_filters, attention_location_kernel_size):
|
| 214 |
+
super(ForwardAttentionV2, self).__init__()
|
| 215 |
+
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
|
| 216 |
+
bias=False, w_init_gain='tanh')
|
| 217 |
+
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
|
| 218 |
+
w_init_gain='tanh')
|
| 219 |
+
self.v = LinearNorm(attention_dim, 1, bias=False)
|
| 220 |
+
self.location_layer = LocationLayer(attention_location_n_filters,
|
| 221 |
+
attention_location_kernel_size,
|
| 222 |
+
attention_dim)
|
| 223 |
+
self.score_mask_value = -float(1e20)
|
| 224 |
+
|
| 225 |
+
def get_alignment_energies(self, query, processed_memory,
|
| 226 |
+
attention_weights_cat):
|
| 227 |
+
"""
|
| 228 |
+
PARAMS
|
| 229 |
+
------
|
| 230 |
+
query: decoder output (batch, n_mel_channels * n_frames_per_step)
|
| 231 |
+
processed_memory: processed encoder outputs (B, T_in, attention_dim)
|
| 232 |
+
attention_weights_cat: prev. and cumulative att weights (B, 2, max_time)
|
| 233 |
+
RETURNS
|
| 234 |
+
-------
|
| 235 |
+
alignment (batch, max_time)
|
| 236 |
+
"""
|
| 237 |
+
|
| 238 |
+
processed_query = self.query_layer(query.unsqueeze(1))
|
| 239 |
+
processed_attention_weights = self.location_layer(attention_weights_cat)
|
| 240 |
+
energies = self.v(torch.tanh(
|
| 241 |
+
processed_query + processed_attention_weights + processed_memory))
|
| 242 |
+
|
| 243 |
+
energies = energies.squeeze(-1)
|
| 244 |
+
return energies
|
| 245 |
+
|
| 246 |
+
def forward(self, attention_hidden_state, memory, processed_memory,
|
| 247 |
+
attention_weights_cat, mask, log_alpha):
|
| 248 |
+
"""
|
| 249 |
+
PARAMS
|
| 250 |
+
------
|
| 251 |
+
attention_hidden_state: attention rnn last output
|
| 252 |
+
memory: encoder outputs
|
| 253 |
+
processed_memory: processed encoder outputs
|
| 254 |
+
attention_weights_cat: previous and cummulative attention weights
|
| 255 |
+
mask: binary mask for padded data
|
| 256 |
+
"""
|
| 257 |
+
log_energy = self.get_alignment_energies(
|
| 258 |
+
attention_hidden_state, processed_memory, attention_weights_cat)
|
| 259 |
+
|
| 260 |
+
#log_energy =
|
| 261 |
+
|
| 262 |
+
if mask is not None:
|
| 263 |
+
log_energy.data.masked_fill_(mask, self.score_mask_value)
|
| 264 |
+
|
| 265 |
+
#attention_weights = F.softmax(alignment, dim=1)
|
| 266 |
+
|
| 267 |
+
#content_score = log_energy.unsqueeze(1) #[B, MAX_TIME] -> [B, 1, MAX_TIME]
|
| 268 |
+
#log_alpha = log_alpha.unsqueeze(2) #[B, MAX_TIME] -> [B, MAX_TIME, 1]
|
| 269 |
+
|
| 270 |
+
#log_total_score = log_alpha + content_score
|
| 271 |
+
|
| 272 |
+
#previous_attention_weights = attention_weights_cat[:,0,:]
|
| 273 |
+
|
| 274 |
+
log_alpha_shift_padded = []
|
| 275 |
+
max_time = log_energy.size(1)
|
| 276 |
+
for sft in range(2):
|
| 277 |
+
shifted = log_alpha[:,:max_time-sft]
|
| 278 |
+
shift_padded = F.pad(shifted, (sft,0), 'constant', self.score_mask_value)
|
| 279 |
+
log_alpha_shift_padded.append(shift_padded.unsqueeze(2))
|
| 280 |
+
|
| 281 |
+
biased = torch.logsumexp(torch.cat(log_alpha_shift_padded,2), 2)
|
| 282 |
+
|
| 283 |
+
log_alpha_new = biased + log_energy
|
| 284 |
+
|
| 285 |
+
attention_weights = F.softmax(log_alpha_new, dim=1)
|
| 286 |
+
|
| 287 |
+
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
|
| 288 |
+
attention_context = attention_context.squeeze(1)
|
| 289 |
+
|
| 290 |
+
return attention_context, attention_weights, log_alpha_new
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
class PhaseShuffle2d(nn.Module):
|
| 294 |
+
def __init__(self, n=2):
|
| 295 |
+
super(PhaseShuffle2d, self).__init__()
|
| 296 |
+
self.n = n
|
| 297 |
+
self.random = random.Random(1)
|
| 298 |
+
|
| 299 |
+
def forward(self, x, move=None):
|
| 300 |
+
# x.size = (B, C, M, L)
|
| 301 |
+
if move is None:
|
| 302 |
+
move = self.random.randint(-self.n, self.n)
|
| 303 |
+
|
| 304 |
+
if move == 0:
|
| 305 |
+
return x
|
| 306 |
+
else:
|
| 307 |
+
left = x[:, :, :, :move]
|
| 308 |
+
right = x[:, :, :, move:]
|
| 309 |
+
shuffled = torch.cat([right, left], dim=3)
|
| 310 |
+
return shuffled
|
| 311 |
+
|
| 312 |
+
class PhaseShuffle1d(nn.Module):
|
| 313 |
+
def __init__(self, n=2):
|
| 314 |
+
super(PhaseShuffle1d, self).__init__()
|
| 315 |
+
self.n = n
|
| 316 |
+
self.random = random.Random(1)
|
| 317 |
+
|
| 318 |
+
def forward(self, x, move=None):
|
| 319 |
+
# x.size = (B, C, M, L)
|
| 320 |
+
if move is None:
|
| 321 |
+
move = self.random.randint(-self.n, self.n)
|
| 322 |
+
|
| 323 |
+
if move == 0:
|
| 324 |
+
return x
|
| 325 |
+
else:
|
| 326 |
+
left = x[:, :, :move]
|
| 327 |
+
right = x[:, :, move:]
|
| 328 |
+
shuffled = torch.cat([right, left], dim=2)
|
| 329 |
+
|
| 330 |
+
return shuffled
|
| 331 |
+
|
| 332 |
+
class MFCC(nn.Module):
|
| 333 |
+
def __init__(self, n_mfcc=40, n_mels=80):
|
| 334 |
+
super(MFCC, self).__init__()
|
| 335 |
+
self.n_mfcc = n_mfcc
|
| 336 |
+
self.n_mels = n_mels
|
| 337 |
+
self.norm = 'ortho'
|
| 338 |
+
dct_mat = audio_F.create_dct(self.n_mfcc, self.n_mels, self.norm)
|
| 339 |
+
self.register_buffer('dct_mat', dct_mat)
|
| 340 |
+
|
| 341 |
+
def forward(self, mel_specgram):
|
| 342 |
+
if len(mel_specgram.shape) == 2:
|
| 343 |
+
mel_specgram = mel_specgram.unsqueeze(0)
|
| 344 |
+
unsqueezed = True
|
| 345 |
+
else:
|
| 346 |
+
unsqueezed = False
|
| 347 |
+
# (channel, n_mels, time).tranpose(...) dot (n_mels, n_mfcc)
|
| 348 |
+
# -> (channel, time, n_mfcc).tranpose(...)
|
| 349 |
+
mfcc = torch.matmul(mel_specgram.transpose(1, 2), self.dct_mat).transpose(1, 2)
|
| 350 |
+
|
| 351 |
+
# unpack batch
|
| 352 |
+
if unsqueezed:
|
| 353 |
+
mfcc = mfcc.squeeze(0)
|
| 354 |
+
return mfcc
|
Utils/ASR/models.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
from torch.nn import TransformerEncoder
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
from .layers import MFCC, Attention, LinearNorm, ConvNorm, ConvBlock
|
| 7 |
+
|
| 8 |
+
class ASRCNN(nn.Module):
|
| 9 |
+
def __init__(self,
|
| 10 |
+
input_dim=80,
|
| 11 |
+
hidden_dim=256,
|
| 12 |
+
n_token=35,
|
| 13 |
+
n_layers=6,
|
| 14 |
+
token_embedding_dim=256,
|
| 15 |
+
|
| 16 |
+
):
|
| 17 |
+
super().__init__()
|
| 18 |
+
self.n_token = n_token
|
| 19 |
+
self.n_down = 1
|
| 20 |
+
self.to_mfcc = MFCC()
|
| 21 |
+
self.init_cnn = ConvNorm(input_dim//2, hidden_dim, kernel_size=7, padding=3, stride=2)
|
| 22 |
+
self.cnns = nn.Sequential(
|
| 23 |
+
*[nn.Sequential(
|
| 24 |
+
ConvBlock(hidden_dim),
|
| 25 |
+
nn.GroupNorm(num_groups=1, num_channels=hidden_dim)
|
| 26 |
+
) for n in range(n_layers)])
|
| 27 |
+
self.projection = ConvNorm(hidden_dim, hidden_dim // 2)
|
| 28 |
+
self.ctc_linear = nn.Sequential(
|
| 29 |
+
LinearNorm(hidden_dim//2, hidden_dim),
|
| 30 |
+
nn.ReLU(),
|
| 31 |
+
LinearNorm(hidden_dim, n_token))
|
| 32 |
+
self.asr_s2s = ASRS2S(
|
| 33 |
+
embedding_dim=token_embedding_dim,
|
| 34 |
+
hidden_dim=hidden_dim//2,
|
| 35 |
+
n_token=n_token)
|
| 36 |
+
|
| 37 |
+
def forward(self, x, src_key_padding_mask=None, text_input=None):
|
| 38 |
+
x = self.to_mfcc(x)
|
| 39 |
+
x = self.init_cnn(x)
|
| 40 |
+
x = self.cnns(x)
|
| 41 |
+
x = self.projection(x)
|
| 42 |
+
x = x.transpose(1, 2)
|
| 43 |
+
ctc_logit = self.ctc_linear(x)
|
| 44 |
+
if text_input is not None:
|
| 45 |
+
_, s2s_logit, s2s_attn = self.asr_s2s(x, src_key_padding_mask, text_input)
|
| 46 |
+
return ctc_logit, s2s_logit, s2s_attn
|
| 47 |
+
else:
|
| 48 |
+
return ctc_logit
|
| 49 |
+
|
| 50 |
+
def get_feature(self, x):
|
| 51 |
+
x = self.to_mfcc(x.squeeze(1))
|
| 52 |
+
x = self.init_cnn(x)
|
| 53 |
+
x = self.cnns(x)
|
| 54 |
+
x = self.projection(x)
|
| 55 |
+
return x
|
| 56 |
+
|
| 57 |
+
def length_to_mask(self, lengths):
|
| 58 |
+
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
|
| 59 |
+
mask = torch.gt(mask+1, lengths.unsqueeze(1)).to(lengths.device)
|
| 60 |
+
return mask
|
| 61 |
+
|
| 62 |
+
def get_future_mask(self, out_length, unmask_future_steps=0):
|
| 63 |
+
"""
|
| 64 |
+
Args:
|
| 65 |
+
out_length (int): returned mask shape is (out_length, out_length).
|
| 66 |
+
unmask_futre_steps (int): unmasking future step size.
|
| 67 |
+
Return:
|
| 68 |
+
mask (torch.BoolTensor): mask future timesteps mask[i, j] = True if i > j + unmask_future_steps else False
|
| 69 |
+
"""
|
| 70 |
+
index_tensor = torch.arange(out_length).unsqueeze(0).expand(out_length, -1)
|
| 71 |
+
mask = torch.gt(index_tensor, index_tensor.T + unmask_future_steps)
|
| 72 |
+
return mask
|
| 73 |
+
|
| 74 |
+
class ASRS2S(nn.Module):
|
| 75 |
+
def __init__(self,
|
| 76 |
+
embedding_dim=256,
|
| 77 |
+
hidden_dim=512,
|
| 78 |
+
n_location_filters=32,
|
| 79 |
+
location_kernel_size=63,
|
| 80 |
+
n_token=40):
|
| 81 |
+
super(ASRS2S, self).__init__()
|
| 82 |
+
self.embedding = nn.Embedding(n_token, embedding_dim)
|
| 83 |
+
val_range = math.sqrt(6 / hidden_dim)
|
| 84 |
+
self.embedding.weight.data.uniform_(-val_range, val_range)
|
| 85 |
+
|
| 86 |
+
self.decoder_rnn_dim = hidden_dim
|
| 87 |
+
self.project_to_n_symbols = nn.Linear(self.decoder_rnn_dim, n_token)
|
| 88 |
+
self.attention_layer = Attention(
|
| 89 |
+
self.decoder_rnn_dim,
|
| 90 |
+
hidden_dim,
|
| 91 |
+
hidden_dim,
|
| 92 |
+
n_location_filters,
|
| 93 |
+
location_kernel_size
|
| 94 |
+
)
|
| 95 |
+
self.decoder_rnn = nn.LSTMCell(self.decoder_rnn_dim + embedding_dim, self.decoder_rnn_dim)
|
| 96 |
+
self.project_to_hidden = nn.Sequential(
|
| 97 |
+
LinearNorm(self.decoder_rnn_dim * 2, hidden_dim),
|
| 98 |
+
nn.Tanh())
|
| 99 |
+
self.sos = 1
|
| 100 |
+
self.eos = 2
|
| 101 |
+
|
| 102 |
+
def initialize_decoder_states(self, memory, mask):
|
| 103 |
+
"""
|
| 104 |
+
moemory.shape = (B, L, H) = (Batchsize, Maxtimestep, Hiddendim)
|
| 105 |
+
"""
|
| 106 |
+
B, L, H = memory.shape
|
| 107 |
+
self.decoder_hidden = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory)
|
| 108 |
+
self.decoder_cell = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory)
|
| 109 |
+
self.attention_weights = torch.zeros((B, L)).type_as(memory)
|
| 110 |
+
self.attention_weights_cum = torch.zeros((B, L)).type_as(memory)
|
| 111 |
+
self.attention_context = torch.zeros((B, H)).type_as(memory)
|
| 112 |
+
self.memory = memory
|
| 113 |
+
self.processed_memory = self.attention_layer.memory_layer(memory)
|
| 114 |
+
self.mask = mask
|
| 115 |
+
self.unk_index = 3
|
| 116 |
+
self.random_mask = 0.1
|
| 117 |
+
|
| 118 |
+
def forward(self, memory, memory_mask, text_input):
|
| 119 |
+
"""
|
| 120 |
+
moemory.shape = (B, L, H) = (Batchsize, Maxtimestep, Hiddendim)
|
| 121 |
+
moemory_mask.shape = (B, L, )
|
| 122 |
+
texts_input.shape = (B, T)
|
| 123 |
+
"""
|
| 124 |
+
self.initialize_decoder_states(memory, memory_mask)
|
| 125 |
+
# text random mask
|
| 126 |
+
random_mask = (torch.rand(text_input.shape) < self.random_mask).to(text_input.device)
|
| 127 |
+
_text_input = text_input.clone()
|
| 128 |
+
_text_input.masked_fill_(random_mask, self.unk_index)
|
| 129 |
+
decoder_inputs = self.embedding(_text_input).transpose(0, 1) # -> [T, B, channel]
|
| 130 |
+
start_embedding = self.embedding(
|
| 131 |
+
torch.LongTensor([self.sos]*decoder_inputs.size(1)).to(decoder_inputs.device))
|
| 132 |
+
decoder_inputs = torch.cat((start_embedding.unsqueeze(0), decoder_inputs), dim=0)
|
| 133 |
+
|
| 134 |
+
hidden_outputs, logit_outputs, alignments = [], [], []
|
| 135 |
+
while len(hidden_outputs) < decoder_inputs.size(0):
|
| 136 |
+
|
| 137 |
+
decoder_input = decoder_inputs[len(hidden_outputs)]
|
| 138 |
+
hidden, logit, attention_weights = self.decode(decoder_input)
|
| 139 |
+
hidden_outputs += [hidden]
|
| 140 |
+
logit_outputs += [logit]
|
| 141 |
+
alignments += [attention_weights]
|
| 142 |
+
|
| 143 |
+
hidden_outputs, logit_outputs, alignments = \
|
| 144 |
+
self.parse_decoder_outputs(
|
| 145 |
+
hidden_outputs, logit_outputs, alignments)
|
| 146 |
+
|
| 147 |
+
return hidden_outputs, logit_outputs, alignments
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def decode(self, decoder_input):
|
| 151 |
+
|
| 152 |
+
cell_input = torch.cat((decoder_input, self.attention_context), -1)
|
| 153 |
+
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
|
| 154 |
+
cell_input,
|
| 155 |
+
(self.decoder_hidden, self.decoder_cell))
|
| 156 |
+
|
| 157 |
+
attention_weights_cat = torch.cat(
|
| 158 |
+
(self.attention_weights.unsqueeze(1),
|
| 159 |
+
self.attention_weights_cum.unsqueeze(1)),dim=1)
|
| 160 |
+
|
| 161 |
+
self.attention_context, self.attention_weights = self.attention_layer(
|
| 162 |
+
self.decoder_hidden,
|
| 163 |
+
self.memory,
|
| 164 |
+
self.processed_memory,
|
| 165 |
+
attention_weights_cat,
|
| 166 |
+
self.mask)
|
| 167 |
+
|
| 168 |
+
self.attention_weights_cum += self.attention_weights
|
| 169 |
+
|
| 170 |
+
hidden_and_context = torch.cat((self.decoder_hidden, self.attention_context), -1)
|
| 171 |
+
hidden = self.project_to_hidden(hidden_and_context)
|
| 172 |
+
|
| 173 |
+
# dropout to increasing g
|
| 174 |
+
logit = self.project_to_n_symbols(F.dropout(hidden, 0.5, self.training))
|
| 175 |
+
|
| 176 |
+
return hidden, logit, self.attention_weights
|
| 177 |
+
|
| 178 |
+
def parse_decoder_outputs(self, hidden, logit, alignments):
|
| 179 |
+
|
| 180 |
+
# -> [B, T_out + 1, max_time]
|
| 181 |
+
alignments = torch.stack(alignments).transpose(0,1)
|
| 182 |
+
# [T_out + 1, B, n_symbols] -> [B, T_out + 1, n_symbols]
|
| 183 |
+
logit = torch.stack(logit).transpose(0, 1).contiguous()
|
| 184 |
+
hidden = torch.stack(hidden).transpose(0, 1).contiguous()
|
| 185 |
+
|
| 186 |
+
return hidden, logit, alignments
|
Utils/JDC/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
Utils/JDC/bst.t7
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:54dc94364b97e18ac1dfa6287714ed121248cfaac4cfd39d061c6e0a089ef169
|
| 3 |
+
size 21029926
|
Utils/JDC/model.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Implementation of model from:
|
| 3 |
+
Kum et al. - "Joint Detection and Classification of Singing Voice Melody Using
|
| 4 |
+
Convolutional Recurrent Neural Networks" (2019)
|
| 5 |
+
Link: https://www.semanticscholar.org/paper/Joint-Detection-and-Classification-of-Singing-Voice-Kum-Nam/60a2ad4c7db43bace75805054603747fcd062c0d
|
| 6 |
+
"""
|
| 7 |
+
import torch
|
| 8 |
+
from torch import nn
|
| 9 |
+
|
| 10 |
+
class JDCNet(nn.Module):
|
| 11 |
+
"""
|
| 12 |
+
Joint Detection and Classification Network model for singing voice melody.
|
| 13 |
+
"""
|
| 14 |
+
def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01):
|
| 15 |
+
super().__init__()
|
| 16 |
+
self.num_class = num_class
|
| 17 |
+
|
| 18 |
+
# input = (b, 1, 31, 513), b = batch size
|
| 19 |
+
self.conv_block = nn.Sequential(
|
| 20 |
+
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), # out: (b, 64, 31, 513)
|
| 21 |
+
nn.BatchNorm2d(num_features=64),
|
| 22 |
+
nn.LeakyReLU(leaky_relu_slope, inplace=True),
|
| 23 |
+
nn.Conv2d(64, 64, 3, padding=1, bias=False), # (b, 64, 31, 513)
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# res blocks
|
| 27 |
+
self.res_block1 = ResBlock(in_channels=64, out_channels=128) # (b, 128, 31, 128)
|
| 28 |
+
self.res_block2 = ResBlock(in_channels=128, out_channels=192) # (b, 192, 31, 32)
|
| 29 |
+
self.res_block3 = ResBlock(in_channels=192, out_channels=256) # (b, 256, 31, 8)
|
| 30 |
+
|
| 31 |
+
# pool block
|
| 32 |
+
self.pool_block = nn.Sequential(
|
| 33 |
+
nn.BatchNorm2d(num_features=256),
|
| 34 |
+
nn.LeakyReLU(leaky_relu_slope, inplace=True),
|
| 35 |
+
nn.MaxPool2d(kernel_size=(1, 4)), # (b, 256, 31, 2)
|
| 36 |
+
nn.Dropout(p=0.2),
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# maxpool layers (for auxiliary network inputs)
|
| 40 |
+
# in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2)
|
| 41 |
+
self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40))
|
| 42 |
+
# in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2)
|
| 43 |
+
self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20))
|
| 44 |
+
# in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2)
|
| 45 |
+
self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10))
|
| 46 |
+
|
| 47 |
+
# in = (b, 640, 31, 2), out = (b, 256, 31, 2)
|
| 48 |
+
self.detector_conv = nn.Sequential(
|
| 49 |
+
nn.Conv2d(640, 256, 1, bias=False),
|
| 50 |
+
nn.BatchNorm2d(256),
|
| 51 |
+
nn.LeakyReLU(leaky_relu_slope, inplace=True),
|
| 52 |
+
nn.Dropout(p=0.2),
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
# input: (b, 31, 512) - resized from (b, 256, 31, 2)
|
| 56 |
+
self.bilstm_classifier = nn.LSTM(
|
| 57 |
+
input_size=512, hidden_size=256,
|
| 58 |
+
batch_first=True, bidirectional=True) # (b, 31, 512)
|
| 59 |
+
|
| 60 |
+
# input: (b, 31, 512) - resized from (b, 256, 31, 2)
|
| 61 |
+
self.bilstm_detector = nn.LSTM(
|
| 62 |
+
input_size=512, hidden_size=256,
|
| 63 |
+
batch_first=True, bidirectional=True) # (b, 31, 512)
|
| 64 |
+
|
| 65 |
+
# input: (b * 31, 512)
|
| 66 |
+
self.classifier = nn.Linear(in_features=512, out_features=self.num_class) # (b * 31, num_class)
|
| 67 |
+
|
| 68 |
+
# input: (b * 31, 512)
|
| 69 |
+
self.detector = nn.Linear(in_features=512, out_features=2) # (b * 31, 2) - binary classifier
|
| 70 |
+
|
| 71 |
+
# initialize weights
|
| 72 |
+
self.apply(self.init_weights)
|
| 73 |
+
|
| 74 |
+
def get_feature_GAN(self, x):
|
| 75 |
+
seq_len = x.shape[-2]
|
| 76 |
+
x = x.float().transpose(-1, -2)
|
| 77 |
+
|
| 78 |
+
convblock_out = self.conv_block(x)
|
| 79 |
+
|
| 80 |
+
resblock1_out = self.res_block1(convblock_out)
|
| 81 |
+
resblock2_out = self.res_block2(resblock1_out)
|
| 82 |
+
resblock3_out = self.res_block3(resblock2_out)
|
| 83 |
+
poolblock_out = self.pool_block[0](resblock3_out)
|
| 84 |
+
poolblock_out = self.pool_block[1](poolblock_out)
|
| 85 |
+
|
| 86 |
+
return poolblock_out.transpose(-1, -2)
|
| 87 |
+
|
| 88 |
+
def get_feature(self, x):
|
| 89 |
+
seq_len = x.shape[-2]
|
| 90 |
+
x = x.float().transpose(-1, -2)
|
| 91 |
+
|
| 92 |
+
convblock_out = self.conv_block(x)
|
| 93 |
+
|
| 94 |
+
resblock1_out = self.res_block1(convblock_out)
|
| 95 |
+
resblock2_out = self.res_block2(resblock1_out)
|
| 96 |
+
resblock3_out = self.res_block3(resblock2_out)
|
| 97 |
+
poolblock_out = self.pool_block[0](resblock3_out)
|
| 98 |
+
poolblock_out = self.pool_block[1](poolblock_out)
|
| 99 |
+
|
| 100 |
+
return self.pool_block[2](poolblock_out)
|
| 101 |
+
|
| 102 |
+
def forward(self, x):
|
| 103 |
+
"""
|
| 104 |
+
Returns:
|
| 105 |
+
classification_prediction, detection_prediction
|
| 106 |
+
sizes: (b, 31, 722), (b, 31, 2)
|
| 107 |
+
"""
|
| 108 |
+
###############################
|
| 109 |
+
# forward pass for classifier #
|
| 110 |
+
###############################
|
| 111 |
+
seq_len = x.shape[-1]
|
| 112 |
+
x = x.float().transpose(-1, -2)
|
| 113 |
+
|
| 114 |
+
convblock_out = self.conv_block(x)
|
| 115 |
+
|
| 116 |
+
resblock1_out = self.res_block1(convblock_out)
|
| 117 |
+
resblock2_out = self.res_block2(resblock1_out)
|
| 118 |
+
resblock3_out = self.res_block3(resblock2_out)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
poolblock_out = self.pool_block[0](resblock3_out)
|
| 122 |
+
poolblock_out = self.pool_block[1](poolblock_out)
|
| 123 |
+
GAN_feature = poolblock_out.transpose(-1, -2)
|
| 124 |
+
poolblock_out = self.pool_block[2](poolblock_out)
|
| 125 |
+
|
| 126 |
+
# (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512)
|
| 127 |
+
classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, seq_len, 512))
|
| 128 |
+
classifier_out, _ = self.bilstm_classifier(classifier_out) # ignore the hidden states
|
| 129 |
+
|
| 130 |
+
classifier_out = classifier_out.contiguous().view((-1, 512)) # (b * 31, 512)
|
| 131 |
+
classifier_out = self.classifier(classifier_out)
|
| 132 |
+
classifier_out = classifier_out.view((-1, seq_len, self.num_class)) # (b, 31, num_class)
|
| 133 |
+
|
| 134 |
+
# sizes: (b, 31, 722), (b, 31, 2)
|
| 135 |
+
# classifier output consists of predicted pitch classes per frame
|
| 136 |
+
# detector output consists of: (isvoice, notvoice) estimates per frame
|
| 137 |
+
return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out
|
| 138 |
+
|
| 139 |
+
@staticmethod
|
| 140 |
+
def init_weights(m):
|
| 141 |
+
if isinstance(m, nn.Linear):
|
| 142 |
+
nn.init.kaiming_uniform_(m.weight)
|
| 143 |
+
if m.bias is not None:
|
| 144 |
+
nn.init.constant_(m.bias, 0)
|
| 145 |
+
elif isinstance(m, nn.Conv2d):
|
| 146 |
+
nn.init.xavier_normal_(m.weight)
|
| 147 |
+
elif isinstance(m, nn.LSTM) or isinstance(m, nn.LSTMCell):
|
| 148 |
+
for p in m.parameters():
|
| 149 |
+
if p.data is None:
|
| 150 |
+
continue
|
| 151 |
+
|
| 152 |
+
if len(p.shape) >= 2:
|
| 153 |
+
nn.init.orthogonal_(p.data)
|
| 154 |
+
else:
|
| 155 |
+
nn.init.normal_(p.data)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class ResBlock(nn.Module):
|
| 159 |
+
def __init__(self, in_channels: int, out_channels: int, leaky_relu_slope=0.01):
|
| 160 |
+
super().__init__()
|
| 161 |
+
self.downsample = in_channels != out_channels
|
| 162 |
+
|
| 163 |
+
# BN / LReLU / MaxPool layer before the conv layer - see Figure 1b in the paper
|
| 164 |
+
self.pre_conv = nn.Sequential(
|
| 165 |
+
nn.BatchNorm2d(num_features=in_channels),
|
| 166 |
+
nn.LeakyReLU(leaky_relu_slope, inplace=True),
|
| 167 |
+
nn.MaxPool2d(kernel_size=(1, 2)), # apply downsampling on the y axis only
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# conv layers
|
| 171 |
+
self.conv = nn.Sequential(
|
| 172 |
+
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
|
| 173 |
+
kernel_size=3, padding=1, bias=False),
|
| 174 |
+
nn.BatchNorm2d(out_channels),
|
| 175 |
+
nn.LeakyReLU(leaky_relu_slope, inplace=True),
|
| 176 |
+
nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False),
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
# 1 x 1 convolution layer to match the feature dimensions
|
| 180 |
+
self.conv1by1 = None
|
| 181 |
+
if self.downsample:
|
| 182 |
+
self.conv1by1 = nn.Conv2d(in_channels, out_channels, 1, bias=False)
|
| 183 |
+
|
| 184 |
+
def forward(self, x):
|
| 185 |
+
x = self.pre_conv(x)
|
| 186 |
+
if self.downsample:
|
| 187 |
+
x = self.conv(x) + self.conv1by1(x)
|
| 188 |
+
else:
|
| 189 |
+
x = self.conv(x) + x
|
| 190 |
+
return x
|
Utils/PLBERT/config.yml
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
log_dir: "Checkpoint"
|
| 2 |
+
mixed_precision: "fp16"
|
| 3 |
+
data_folder: "wikipedia_20220301.en.processed"
|
| 4 |
+
batch_size: 192
|
| 5 |
+
save_interval: 5000
|
| 6 |
+
log_interval: 10
|
| 7 |
+
num_process: 1 # number of GPUs
|
| 8 |
+
num_steps: 1000000
|
| 9 |
+
|
| 10 |
+
dataset_params:
|
| 11 |
+
tokenizer: "transfo-xl-wt103"
|
| 12 |
+
token_separator: " " # token used for phoneme separator (space)
|
| 13 |
+
token_mask: "M" # token used for phoneme mask (M)
|
| 14 |
+
word_separator: 3039 # token used for word separator (<formula>)
|
| 15 |
+
token_maps: "token_maps.pkl" # token map path
|
| 16 |
+
|
| 17 |
+
max_mel_length: 512 # max phoneme length
|
| 18 |
+
|
| 19 |
+
word_mask_prob: 0.15 # probability to mask the entire word
|
| 20 |
+
phoneme_mask_prob: 0.1 # probability to mask each phoneme
|
| 21 |
+
replace_prob: 0.2 # probablity to replace phonemes
|
| 22 |
+
|
| 23 |
+
model_params:
|
| 24 |
+
vocab_size: 191
|
| 25 |
+
hidden_size: 768
|
| 26 |
+
num_attention_heads: 12
|
| 27 |
+
intermediate_size: 2048
|
| 28 |
+
max_position_embeddings: 512
|
| 29 |
+
num_hidden_layers: 12
|
| 30 |
+
dropout: 0.1
|
Utils/PLBERT/step_1000000.t7
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d1403dfd45018dba5b84b25836d51f426f3fd36b1e4501520bb2291227037f83
|
| 3 |
+
size 25192693
|
Utils/PLBERT/step_1000000.t7.backup
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0714ff85804db43e06b3b0ac5749bf90cf206257c6c5916e8a98c5933b4c21e0
|
| 3 |
+
size 25185187
|
Utils/PLBERT/util.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import yaml
|
| 3 |
+
import torch
|
| 4 |
+
from transformers import AlbertConfig, AlbertModel
|
| 5 |
+
|
| 6 |
+
class CustomAlbert(AlbertModel):
|
| 7 |
+
def forward(self, *args, **kwargs):
|
| 8 |
+
# Call the original forward method
|
| 9 |
+
outputs = super().forward(*args, **kwargs)
|
| 10 |
+
|
| 11 |
+
# Only return the last_hidden_state
|
| 12 |
+
return outputs.last_hidden_state
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def load_plbert(log_dir):
|
| 16 |
+
config_path = os.path.join(log_dir, "config.yml")
|
| 17 |
+
plbert_config = yaml.safe_load(open(config_path))
|
| 18 |
+
|
| 19 |
+
albert_base_configuration = AlbertConfig(**plbert_config['model_params'])
|
| 20 |
+
bert = CustomAlbert(albert_base_configuration)
|
| 21 |
+
|
| 22 |
+
files = os.listdir(log_dir)
|
| 23 |
+
ckpts = []
|
| 24 |
+
for f in os.listdir(log_dir):
|
| 25 |
+
if f.startswith("step_"): ckpts.append(f)
|
| 26 |
+
|
| 27 |
+
iters = [int(f.split('_')[-1].split('.')[0]) for f in ckpts if os.path.isfile(os.path.join(log_dir, f))]
|
| 28 |
+
iters = sorted(iters)[-1]
|
| 29 |
+
|
| 30 |
+
checkpoint = torch.load(log_dir + "/step_" + str(iters) + ".t7", map_location='cpu')
|
| 31 |
+
state_dict = checkpoint['net']
|
| 32 |
+
from collections import OrderedDict
|
| 33 |
+
new_state_dict = OrderedDict()
|
| 34 |
+
for k, v in state_dict.items():
|
| 35 |
+
name = k[7:] # remove `module.`
|
| 36 |
+
if name.startswith('encoder.'):
|
| 37 |
+
name = name[8:] # remove `encoder.`
|
| 38 |
+
new_state_dict[name] = v
|
| 39 |
+
del new_state_dict["embeddings.position_ids"]
|
| 40 |
+
bert.load_state_dict(new_state_dict, strict=False)
|
| 41 |
+
|
| 42 |
+
return bert
|
Utils/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
Utils_original/ASR/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
Utils_original/ASR/config.yml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
log_dir: "logs/20201006"
|
| 2 |
+
save_freq: 5
|
| 3 |
+
device: "cuda"
|
| 4 |
+
epochs: 180
|
| 5 |
+
batch_size: 64
|
| 6 |
+
pretrained_model: ""
|
| 7 |
+
train_data: "ASRDataset/train_list.txt"
|
| 8 |
+
val_data: "ASRDataset/val_list.txt"
|
| 9 |
+
|
| 10 |
+
dataset_params:
|
| 11 |
+
data_augmentation: false
|
| 12 |
+
|
| 13 |
+
preprocess_parasm:
|
| 14 |
+
sr: 24000
|
| 15 |
+
spect_params:
|
| 16 |
+
n_fft: 2048
|
| 17 |
+
win_length: 1200
|
| 18 |
+
hop_length: 300
|
| 19 |
+
mel_params:
|
| 20 |
+
n_mels: 80
|
| 21 |
+
|
| 22 |
+
model_params:
|
| 23 |
+
input_dim: 80
|
| 24 |
+
hidden_dim: 256
|
| 25 |
+
n_token: 178
|
| 26 |
+
token_embedding_dim: 512
|
| 27 |
+
|
| 28 |
+
optimizer_params:
|
| 29 |
+
lr: 0.0005
|
Utils_original/ASR/layers.py
ADDED
|
@@ -0,0 +1,354 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
from typing import Optional, Any
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
import torchaudio
|
| 8 |
+
import torchaudio.functional as audio_F
|
| 9 |
+
|
| 10 |
+
import random
|
| 11 |
+
random.seed(0)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _get_activation_fn(activ):
|
| 15 |
+
if activ == 'relu':
|
| 16 |
+
return nn.ReLU()
|
| 17 |
+
elif activ == 'lrelu':
|
| 18 |
+
return nn.LeakyReLU(0.2)
|
| 19 |
+
elif activ == 'swish':
|
| 20 |
+
return lambda x: x*torch.sigmoid(x)
|
| 21 |
+
else:
|
| 22 |
+
raise RuntimeError('Unexpected activ type %s, expected [relu, lrelu, swish]' % activ)
|
| 23 |
+
|
| 24 |
+
class LinearNorm(torch.nn.Module):
|
| 25 |
+
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
|
| 26 |
+
super(LinearNorm, self).__init__()
|
| 27 |
+
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
|
| 28 |
+
|
| 29 |
+
torch.nn.init.xavier_uniform_(
|
| 30 |
+
self.linear_layer.weight,
|
| 31 |
+
gain=torch.nn.init.calculate_gain(w_init_gain))
|
| 32 |
+
|
| 33 |
+
def forward(self, x):
|
| 34 |
+
return self.linear_layer(x)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class ConvNorm(torch.nn.Module):
|
| 38 |
+
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
|
| 39 |
+
padding=None, dilation=1, bias=True, w_init_gain='linear', param=None):
|
| 40 |
+
super(ConvNorm, self).__init__()
|
| 41 |
+
if padding is None:
|
| 42 |
+
assert(kernel_size % 2 == 1)
|
| 43 |
+
padding = int(dilation * (kernel_size - 1) / 2)
|
| 44 |
+
|
| 45 |
+
self.conv = torch.nn.Conv1d(in_channels, out_channels,
|
| 46 |
+
kernel_size=kernel_size, stride=stride,
|
| 47 |
+
padding=padding, dilation=dilation,
|
| 48 |
+
bias=bias)
|
| 49 |
+
|
| 50 |
+
torch.nn.init.xavier_uniform_(
|
| 51 |
+
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param))
|
| 52 |
+
|
| 53 |
+
def forward(self, signal):
|
| 54 |
+
conv_signal = self.conv(signal)
|
| 55 |
+
return conv_signal
|
| 56 |
+
|
| 57 |
+
class CausualConv(nn.Module):
|
| 58 |
+
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=1, dilation=1, bias=True, w_init_gain='linear', param=None):
|
| 59 |
+
super(CausualConv, self).__init__()
|
| 60 |
+
if padding is None:
|
| 61 |
+
assert(kernel_size % 2 == 1)
|
| 62 |
+
padding = int(dilation * (kernel_size - 1) / 2) * 2
|
| 63 |
+
else:
|
| 64 |
+
self.padding = padding * 2
|
| 65 |
+
self.conv = nn.Conv1d(in_channels, out_channels,
|
| 66 |
+
kernel_size=kernel_size, stride=stride,
|
| 67 |
+
padding=self.padding,
|
| 68 |
+
dilation=dilation,
|
| 69 |
+
bias=bias)
|
| 70 |
+
|
| 71 |
+
torch.nn.init.xavier_uniform_(
|
| 72 |
+
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param))
|
| 73 |
+
|
| 74 |
+
def forward(self, x):
|
| 75 |
+
x = self.conv(x)
|
| 76 |
+
x = x[:, :, :-self.padding]
|
| 77 |
+
return x
|
| 78 |
+
|
| 79 |
+
class CausualBlock(nn.Module):
|
| 80 |
+
def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='lrelu'):
|
| 81 |
+
super(CausualBlock, self).__init__()
|
| 82 |
+
self.blocks = nn.ModuleList([
|
| 83 |
+
self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p)
|
| 84 |
+
for i in range(n_conv)])
|
| 85 |
+
|
| 86 |
+
def forward(self, x):
|
| 87 |
+
for block in self.blocks:
|
| 88 |
+
res = x
|
| 89 |
+
x = block(x)
|
| 90 |
+
x += res
|
| 91 |
+
return x
|
| 92 |
+
|
| 93 |
+
def _get_conv(self, hidden_dim, dilation, activ='lrelu', dropout_p=0.2):
|
| 94 |
+
layers = [
|
| 95 |
+
CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation),
|
| 96 |
+
_get_activation_fn(activ),
|
| 97 |
+
nn.BatchNorm1d(hidden_dim),
|
| 98 |
+
nn.Dropout(p=dropout_p),
|
| 99 |
+
CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1),
|
| 100 |
+
_get_activation_fn(activ),
|
| 101 |
+
nn.Dropout(p=dropout_p)
|
| 102 |
+
]
|
| 103 |
+
return nn.Sequential(*layers)
|
| 104 |
+
|
| 105 |
+
class ConvBlock(nn.Module):
|
| 106 |
+
def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='relu'):
|
| 107 |
+
super().__init__()
|
| 108 |
+
self._n_groups = 8
|
| 109 |
+
self.blocks = nn.ModuleList([
|
| 110 |
+
self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p)
|
| 111 |
+
for i in range(n_conv)])
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def forward(self, x):
|
| 115 |
+
for block in self.blocks:
|
| 116 |
+
res = x
|
| 117 |
+
x = block(x)
|
| 118 |
+
x += res
|
| 119 |
+
return x
|
| 120 |
+
|
| 121 |
+
def _get_conv(self, hidden_dim, dilation, activ='relu', dropout_p=0.2):
|
| 122 |
+
layers = [
|
| 123 |
+
ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation),
|
| 124 |
+
_get_activation_fn(activ),
|
| 125 |
+
nn.GroupNorm(num_groups=self._n_groups, num_channels=hidden_dim),
|
| 126 |
+
nn.Dropout(p=dropout_p),
|
| 127 |
+
ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1),
|
| 128 |
+
_get_activation_fn(activ),
|
| 129 |
+
nn.Dropout(p=dropout_p)
|
| 130 |
+
]
|
| 131 |
+
return nn.Sequential(*layers)
|
| 132 |
+
|
| 133 |
+
class LocationLayer(nn.Module):
|
| 134 |
+
def __init__(self, attention_n_filters, attention_kernel_size,
|
| 135 |
+
attention_dim):
|
| 136 |
+
super(LocationLayer, self).__init__()
|
| 137 |
+
padding = int((attention_kernel_size - 1) / 2)
|
| 138 |
+
self.location_conv = ConvNorm(2, attention_n_filters,
|
| 139 |
+
kernel_size=attention_kernel_size,
|
| 140 |
+
padding=padding, bias=False, stride=1,
|
| 141 |
+
dilation=1)
|
| 142 |
+
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
|
| 143 |
+
bias=False, w_init_gain='tanh')
|
| 144 |
+
|
| 145 |
+
def forward(self, attention_weights_cat):
|
| 146 |
+
processed_attention = self.location_conv(attention_weights_cat)
|
| 147 |
+
processed_attention = processed_attention.transpose(1, 2)
|
| 148 |
+
processed_attention = self.location_dense(processed_attention)
|
| 149 |
+
return processed_attention
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class Attention(nn.Module):
|
| 153 |
+
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
|
| 154 |
+
attention_location_n_filters, attention_location_kernel_size):
|
| 155 |
+
super(Attention, self).__init__()
|
| 156 |
+
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
|
| 157 |
+
bias=False, w_init_gain='tanh')
|
| 158 |
+
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
|
| 159 |
+
w_init_gain='tanh')
|
| 160 |
+
self.v = LinearNorm(attention_dim, 1, bias=False)
|
| 161 |
+
self.location_layer = LocationLayer(attention_location_n_filters,
|
| 162 |
+
attention_location_kernel_size,
|
| 163 |
+
attention_dim)
|
| 164 |
+
self.score_mask_value = -float("inf")
|
| 165 |
+
|
| 166 |
+
def get_alignment_energies(self, query, processed_memory,
|
| 167 |
+
attention_weights_cat):
|
| 168 |
+
"""
|
| 169 |
+
PARAMS
|
| 170 |
+
------
|
| 171 |
+
query: decoder output (batch, n_mel_channels * n_frames_per_step)
|
| 172 |
+
processed_memory: processed encoder outputs (B, T_in, attention_dim)
|
| 173 |
+
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
|
| 174 |
+
RETURNS
|
| 175 |
+
-------
|
| 176 |
+
alignment (batch, max_time)
|
| 177 |
+
"""
|
| 178 |
+
|
| 179 |
+
processed_query = self.query_layer(query.unsqueeze(1))
|
| 180 |
+
processed_attention_weights = self.location_layer(attention_weights_cat)
|
| 181 |
+
energies = self.v(torch.tanh(
|
| 182 |
+
processed_query + processed_attention_weights + processed_memory))
|
| 183 |
+
|
| 184 |
+
energies = energies.squeeze(-1)
|
| 185 |
+
return energies
|
| 186 |
+
|
| 187 |
+
def forward(self, attention_hidden_state, memory, processed_memory,
|
| 188 |
+
attention_weights_cat, mask):
|
| 189 |
+
"""
|
| 190 |
+
PARAMS
|
| 191 |
+
------
|
| 192 |
+
attention_hidden_state: attention rnn last output
|
| 193 |
+
memory: encoder outputs
|
| 194 |
+
processed_memory: processed encoder outputs
|
| 195 |
+
attention_weights_cat: previous and cummulative attention weights
|
| 196 |
+
mask: binary mask for padded data
|
| 197 |
+
"""
|
| 198 |
+
alignment = self.get_alignment_energies(
|
| 199 |
+
attention_hidden_state, processed_memory, attention_weights_cat)
|
| 200 |
+
|
| 201 |
+
if mask is not None:
|
| 202 |
+
alignment.data.masked_fill_(mask, self.score_mask_value)
|
| 203 |
+
|
| 204 |
+
attention_weights = F.softmax(alignment, dim=1)
|
| 205 |
+
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
|
| 206 |
+
attention_context = attention_context.squeeze(1)
|
| 207 |
+
|
| 208 |
+
return attention_context, attention_weights
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class ForwardAttentionV2(nn.Module):
|
| 212 |
+
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
|
| 213 |
+
attention_location_n_filters, attention_location_kernel_size):
|
| 214 |
+
super(ForwardAttentionV2, self).__init__()
|
| 215 |
+
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
|
| 216 |
+
bias=False, w_init_gain='tanh')
|
| 217 |
+
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
|
| 218 |
+
w_init_gain='tanh')
|
| 219 |
+
self.v = LinearNorm(attention_dim, 1, bias=False)
|
| 220 |
+
self.location_layer = LocationLayer(attention_location_n_filters,
|
| 221 |
+
attention_location_kernel_size,
|
| 222 |
+
attention_dim)
|
| 223 |
+
self.score_mask_value = -float(1e20)
|
| 224 |
+
|
| 225 |
+
def get_alignment_energies(self, query, processed_memory,
|
| 226 |
+
attention_weights_cat):
|
| 227 |
+
"""
|
| 228 |
+
PARAMS
|
| 229 |
+
------
|
| 230 |
+
query: decoder output (batch, n_mel_channels * n_frames_per_step)
|
| 231 |
+
processed_memory: processed encoder outputs (B, T_in, attention_dim)
|
| 232 |
+
attention_weights_cat: prev. and cumulative att weights (B, 2, max_time)
|
| 233 |
+
RETURNS
|
| 234 |
+
-------
|
| 235 |
+
alignment (batch, max_time)
|
| 236 |
+
"""
|
| 237 |
+
|
| 238 |
+
processed_query = self.query_layer(query.unsqueeze(1))
|
| 239 |
+
processed_attention_weights = self.location_layer(attention_weights_cat)
|
| 240 |
+
energies = self.v(torch.tanh(
|
| 241 |
+
processed_query + processed_attention_weights + processed_memory))
|
| 242 |
+
|
| 243 |
+
energies = energies.squeeze(-1)
|
| 244 |
+
return energies
|
| 245 |
+
|
| 246 |
+
def forward(self, attention_hidden_state, memory, processed_memory,
|
| 247 |
+
attention_weights_cat, mask, log_alpha):
|
| 248 |
+
"""
|
| 249 |
+
PARAMS
|
| 250 |
+
------
|
| 251 |
+
attention_hidden_state: attention rnn last output
|
| 252 |
+
memory: encoder outputs
|
| 253 |
+
processed_memory: processed encoder outputs
|
| 254 |
+
attention_weights_cat: previous and cummulative attention weights
|
| 255 |
+
mask: binary mask for padded data
|
| 256 |
+
"""
|
| 257 |
+
log_energy = self.get_alignment_energies(
|
| 258 |
+
attention_hidden_state, processed_memory, attention_weights_cat)
|
| 259 |
+
|
| 260 |
+
#log_energy =
|
| 261 |
+
|
| 262 |
+
if mask is not None:
|
| 263 |
+
log_energy.data.masked_fill_(mask, self.score_mask_value)
|
| 264 |
+
|
| 265 |
+
#attention_weights = F.softmax(alignment, dim=1)
|
| 266 |
+
|
| 267 |
+
#content_score = log_energy.unsqueeze(1) #[B, MAX_TIME] -> [B, 1, MAX_TIME]
|
| 268 |
+
#log_alpha = log_alpha.unsqueeze(2) #[B, MAX_TIME] -> [B, MAX_TIME, 1]
|
| 269 |
+
|
| 270 |
+
#log_total_score = log_alpha + content_score
|
| 271 |
+
|
| 272 |
+
#previous_attention_weights = attention_weights_cat[:,0,:]
|
| 273 |
+
|
| 274 |
+
log_alpha_shift_padded = []
|
| 275 |
+
max_time = log_energy.size(1)
|
| 276 |
+
for sft in range(2):
|
| 277 |
+
shifted = log_alpha[:,:max_time-sft]
|
| 278 |
+
shift_padded = F.pad(shifted, (sft,0), 'constant', self.score_mask_value)
|
| 279 |
+
log_alpha_shift_padded.append(shift_padded.unsqueeze(2))
|
| 280 |
+
|
| 281 |
+
biased = torch.logsumexp(torch.cat(log_alpha_shift_padded,2), 2)
|
| 282 |
+
|
| 283 |
+
log_alpha_new = biased + log_energy
|
| 284 |
+
|
| 285 |
+
attention_weights = F.softmax(log_alpha_new, dim=1)
|
| 286 |
+
|
| 287 |
+
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
|
| 288 |
+
attention_context = attention_context.squeeze(1)
|
| 289 |
+
|
| 290 |
+
return attention_context, attention_weights, log_alpha_new
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
class PhaseShuffle2d(nn.Module):
|
| 294 |
+
def __init__(self, n=2):
|
| 295 |
+
super(PhaseShuffle2d, self).__init__()
|
| 296 |
+
self.n = n
|
| 297 |
+
self.random = random.Random(1)
|
| 298 |
+
|
| 299 |
+
def forward(self, x, move=None):
|
| 300 |
+
# x.size = (B, C, M, L)
|
| 301 |
+
if move is None:
|
| 302 |
+
move = self.random.randint(-self.n, self.n)
|
| 303 |
+
|
| 304 |
+
if move == 0:
|
| 305 |
+
return x
|
| 306 |
+
else:
|
| 307 |
+
left = x[:, :, :, :move]
|
| 308 |
+
right = x[:, :, :, move:]
|
| 309 |
+
shuffled = torch.cat([right, left], dim=3)
|
| 310 |
+
return shuffled
|
| 311 |
+
|
| 312 |
+
class PhaseShuffle1d(nn.Module):
|
| 313 |
+
def __init__(self, n=2):
|
| 314 |
+
super(PhaseShuffle1d, self).__init__()
|
| 315 |
+
self.n = n
|
| 316 |
+
self.random = random.Random(1)
|
| 317 |
+
|
| 318 |
+
def forward(self, x, move=None):
|
| 319 |
+
# x.size = (B, C, M, L)
|
| 320 |
+
if move is None:
|
| 321 |
+
move = self.random.randint(-self.n, self.n)
|
| 322 |
+
|
| 323 |
+
if move == 0:
|
| 324 |
+
return x
|
| 325 |
+
else:
|
| 326 |
+
left = x[:, :, :move]
|
| 327 |
+
right = x[:, :, move:]
|
| 328 |
+
shuffled = torch.cat([right, left], dim=2)
|
| 329 |
+
|
| 330 |
+
return shuffled
|
| 331 |
+
|
| 332 |
+
class MFCC(nn.Module):
|
| 333 |
+
def __init__(self, n_mfcc=40, n_mels=80):
|
| 334 |
+
super(MFCC, self).__init__()
|
| 335 |
+
self.n_mfcc = n_mfcc
|
| 336 |
+
self.n_mels = n_mels
|
| 337 |
+
self.norm = 'ortho'
|
| 338 |
+
dct_mat = audio_F.create_dct(self.n_mfcc, self.n_mels, self.norm)
|
| 339 |
+
self.register_buffer('dct_mat', dct_mat)
|
| 340 |
+
|
| 341 |
+
def forward(self, mel_specgram):
|
| 342 |
+
if len(mel_specgram.shape) == 2:
|
| 343 |
+
mel_specgram = mel_specgram.unsqueeze(0)
|
| 344 |
+
unsqueezed = True
|
| 345 |
+
else:
|
| 346 |
+
unsqueezed = False
|
| 347 |
+
# (channel, n_mels, time).tranpose(...) dot (n_mels, n_mfcc)
|
| 348 |
+
# -> (channel, time, n_mfcc).tranpose(...)
|
| 349 |
+
mfcc = torch.matmul(mel_specgram.transpose(1, 2), self.dct_mat).transpose(1, 2)
|
| 350 |
+
|
| 351 |
+
# unpack batch
|
| 352 |
+
if unsqueezed:
|
| 353 |
+
mfcc = mfcc.squeeze(0)
|
| 354 |
+
return mfcc
|
Utils_original/ASR/models.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
from torch.nn import TransformerEncoder
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
from .layers import MFCC, Attention, LinearNorm, ConvNorm, ConvBlock
|
| 7 |
+
|
| 8 |
+
class ASRCNN(nn.Module):
|
| 9 |
+
def __init__(self,
|
| 10 |
+
input_dim=80,
|
| 11 |
+
hidden_dim=256,
|
| 12 |
+
n_token=35,
|
| 13 |
+
n_layers=6,
|
| 14 |
+
token_embedding_dim=256,
|
| 15 |
+
|
| 16 |
+
):
|
| 17 |
+
super().__init__()
|
| 18 |
+
self.n_token = n_token
|
| 19 |
+
self.n_down = 1
|
| 20 |
+
self.to_mfcc = MFCC()
|
| 21 |
+
self.init_cnn = ConvNorm(input_dim//2, hidden_dim, kernel_size=7, padding=3, stride=2)
|
| 22 |
+
self.cnns = nn.Sequential(
|
| 23 |
+
*[nn.Sequential(
|
| 24 |
+
ConvBlock(hidden_dim),
|
| 25 |
+
nn.GroupNorm(num_groups=1, num_channels=hidden_dim)
|
| 26 |
+
) for n in range(n_layers)])
|
| 27 |
+
self.projection = ConvNorm(hidden_dim, hidden_dim // 2)
|
| 28 |
+
self.ctc_linear = nn.Sequential(
|
| 29 |
+
LinearNorm(hidden_dim//2, hidden_dim),
|
| 30 |
+
nn.ReLU(),
|
| 31 |
+
LinearNorm(hidden_dim, n_token))
|
| 32 |
+
self.asr_s2s = ASRS2S(
|
| 33 |
+
embedding_dim=token_embedding_dim,
|
| 34 |
+
hidden_dim=hidden_dim//2,
|
| 35 |
+
n_token=n_token)
|
| 36 |
+
|
| 37 |
+
def forward(self, x, src_key_padding_mask=None, text_input=None):
|
| 38 |
+
x = self.to_mfcc(x)
|
| 39 |
+
x = self.init_cnn(x)
|
| 40 |
+
x = self.cnns(x)
|
| 41 |
+
x = self.projection(x)
|
| 42 |
+
x = x.transpose(1, 2)
|
| 43 |
+
ctc_logit = self.ctc_linear(x)
|
| 44 |
+
if text_input is not None:
|
| 45 |
+
_, s2s_logit, s2s_attn = self.asr_s2s(x, src_key_padding_mask, text_input)
|
| 46 |
+
return ctc_logit, s2s_logit, s2s_attn
|
| 47 |
+
else:
|
| 48 |
+
return ctc_logit
|
| 49 |
+
|
| 50 |
+
def get_feature(self, x):
|
| 51 |
+
x = self.to_mfcc(x.squeeze(1))
|
| 52 |
+
x = self.init_cnn(x)
|
| 53 |
+
x = self.cnns(x)
|
| 54 |
+
x = self.projection(x)
|
| 55 |
+
return x
|
| 56 |
+
|
| 57 |
+
def length_to_mask(self, lengths):
|
| 58 |
+
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
|
| 59 |
+
mask = torch.gt(mask+1, lengths.unsqueeze(1)).to(lengths.device)
|
| 60 |
+
return mask
|
| 61 |
+
|
| 62 |
+
def get_future_mask(self, out_length, unmask_future_steps=0):
|
| 63 |
+
"""
|
| 64 |
+
Args:
|
| 65 |
+
out_length (int): returned mask shape is (out_length, out_length).
|
| 66 |
+
unmask_futre_steps (int): unmasking future step size.
|
| 67 |
+
Return:
|
| 68 |
+
mask (torch.BoolTensor): mask future timesteps mask[i, j] = True if i > j + unmask_future_steps else False
|
| 69 |
+
"""
|
| 70 |
+
index_tensor = torch.arange(out_length).unsqueeze(0).expand(out_length, -1)
|
| 71 |
+
mask = torch.gt(index_tensor, index_tensor.T + unmask_future_steps)
|
| 72 |
+
return mask
|
| 73 |
+
|
| 74 |
+
class ASRS2S(nn.Module):
|
| 75 |
+
def __init__(self,
|
| 76 |
+
embedding_dim=256,
|
| 77 |
+
hidden_dim=512,
|
| 78 |
+
n_location_filters=32,
|
| 79 |
+
location_kernel_size=63,
|
| 80 |
+
n_token=40):
|
| 81 |
+
super(ASRS2S, self).__init__()
|
| 82 |
+
self.embedding = nn.Embedding(n_token, embedding_dim)
|
| 83 |
+
val_range = math.sqrt(6 / hidden_dim)
|
| 84 |
+
self.embedding.weight.data.uniform_(-val_range, val_range)
|
| 85 |
+
|
| 86 |
+
self.decoder_rnn_dim = hidden_dim
|
| 87 |
+
self.project_to_n_symbols = nn.Linear(self.decoder_rnn_dim, n_token)
|
| 88 |
+
self.attention_layer = Attention(
|
| 89 |
+
self.decoder_rnn_dim,
|
| 90 |
+
hidden_dim,
|
| 91 |
+
hidden_dim,
|
| 92 |
+
n_location_filters,
|
| 93 |
+
location_kernel_size
|
| 94 |
+
)
|
| 95 |
+
self.decoder_rnn = nn.LSTMCell(self.decoder_rnn_dim + embedding_dim, self.decoder_rnn_dim)
|
| 96 |
+
self.project_to_hidden = nn.Sequential(
|
| 97 |
+
LinearNorm(self.decoder_rnn_dim * 2, hidden_dim),
|
| 98 |
+
nn.Tanh())
|
| 99 |
+
self.sos = 1
|
| 100 |
+
self.eos = 2
|
| 101 |
+
|
| 102 |
+
def initialize_decoder_states(self, memory, mask):
|
| 103 |
+
"""
|
| 104 |
+
moemory.shape = (B, L, H) = (Batchsize, Maxtimestep, Hiddendim)
|
| 105 |
+
"""
|
| 106 |
+
B, L, H = memory.shape
|
| 107 |
+
self.decoder_hidden = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory)
|
| 108 |
+
self.decoder_cell = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory)
|
| 109 |
+
self.attention_weights = torch.zeros((B, L)).type_as(memory)
|
| 110 |
+
self.attention_weights_cum = torch.zeros((B, L)).type_as(memory)
|
| 111 |
+
self.attention_context = torch.zeros((B, H)).type_as(memory)
|
| 112 |
+
self.memory = memory
|
| 113 |
+
self.processed_memory = self.attention_layer.memory_layer(memory)
|
| 114 |
+
self.mask = mask
|
| 115 |
+
self.unk_index = 3
|
| 116 |
+
self.random_mask = 0.1
|
| 117 |
+
|
| 118 |
+
def forward(self, memory, memory_mask, text_input):
|
| 119 |
+
"""
|
| 120 |
+
moemory.shape = (B, L, H) = (Batchsize, Maxtimestep, Hiddendim)
|
| 121 |
+
moemory_mask.shape = (B, L, )
|
| 122 |
+
texts_input.shape = (B, T)
|
| 123 |
+
"""
|
| 124 |
+
self.initialize_decoder_states(memory, memory_mask)
|
| 125 |
+
# text random mask
|
| 126 |
+
random_mask = (torch.rand(text_input.shape) < self.random_mask).to(text_input.device)
|
| 127 |
+
_text_input = text_input.clone()
|
| 128 |
+
_text_input.masked_fill_(random_mask, self.unk_index)
|
| 129 |
+
decoder_inputs = self.embedding(_text_input).transpose(0, 1) # -> [T, B, channel]
|
| 130 |
+
start_embedding = self.embedding(
|
| 131 |
+
torch.LongTensor([self.sos]*decoder_inputs.size(1)).to(decoder_inputs.device))
|
| 132 |
+
decoder_inputs = torch.cat((start_embedding.unsqueeze(0), decoder_inputs), dim=0)
|
| 133 |
+
|
| 134 |
+
hidden_outputs, logit_outputs, alignments = [], [], []
|
| 135 |
+
while len(hidden_outputs) < decoder_inputs.size(0):
|
| 136 |
+
|
| 137 |
+
decoder_input = decoder_inputs[len(hidden_outputs)]
|
| 138 |
+
hidden, logit, attention_weights = self.decode(decoder_input)
|
| 139 |
+
hidden_outputs += [hidden]
|
| 140 |
+
logit_outputs += [logit]
|
| 141 |
+
alignments += [attention_weights]
|
| 142 |
+
|
| 143 |
+
hidden_outputs, logit_outputs, alignments = \
|
| 144 |
+
self.parse_decoder_outputs(
|
| 145 |
+
hidden_outputs, logit_outputs, alignments)
|
| 146 |
+
|
| 147 |
+
return hidden_outputs, logit_outputs, alignments
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def decode(self, decoder_input):
|
| 151 |
+
|
| 152 |
+
cell_input = torch.cat((decoder_input, self.attention_context), -1)
|
| 153 |
+
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
|
| 154 |
+
cell_input,
|
| 155 |
+
(self.decoder_hidden, self.decoder_cell))
|
| 156 |
+
|
| 157 |
+
attention_weights_cat = torch.cat(
|
| 158 |
+
(self.attention_weights.unsqueeze(1),
|
| 159 |
+
self.attention_weights_cum.unsqueeze(1)),dim=1)
|
| 160 |
+
|
| 161 |
+
self.attention_context, self.attention_weights = self.attention_layer(
|
| 162 |
+
self.decoder_hidden,
|
| 163 |
+
self.memory,
|
| 164 |
+
self.processed_memory,
|
| 165 |
+
attention_weights_cat,
|
| 166 |
+
self.mask)
|
| 167 |
+
|
| 168 |
+
self.attention_weights_cum += self.attention_weights
|
| 169 |
+
|
| 170 |
+
hidden_and_context = torch.cat((self.decoder_hidden, self.attention_context), -1)
|
| 171 |
+
hidden = self.project_to_hidden(hidden_and_context)
|
| 172 |
+
|
| 173 |
+
# dropout to increasing g
|
| 174 |
+
logit = self.project_to_n_symbols(F.dropout(hidden, 0.5, self.training))
|
| 175 |
+
|
| 176 |
+
return hidden, logit, self.attention_weights
|
| 177 |
+
|
| 178 |
+
def parse_decoder_outputs(self, hidden, logit, alignments):
|
| 179 |
+
|
| 180 |
+
# -> [B, T_out + 1, max_time]
|
| 181 |
+
alignments = torch.stack(alignments).transpose(0,1)
|
| 182 |
+
# [T_out + 1, B, n_symbols] -> [B, T_out + 1, n_symbols]
|
| 183 |
+
logit = torch.stack(logit).transpose(0, 1).contiguous()
|
| 184 |
+
hidden = torch.stack(hidden).transpose(0, 1).contiguous()
|
| 185 |
+
|
| 186 |
+
return hidden, logit, alignments
|
Utils_original/JDC/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
Utils_original/JDC/bst.t7
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:54dc94364b97e18ac1dfa6287714ed121248cfaac4cfd39d061c6e0a089ef169
|
| 3 |
+
size 21029926
|
Utils_original/JDC/model.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Implementation of model from:
|
| 3 |
+
Kum et al. - "Joint Detection and Classification of Singing Voice Melody Using
|
| 4 |
+
Convolutional Recurrent Neural Networks" (2019)
|
| 5 |
+
Link: https://www.semanticscholar.org/paper/Joint-Detection-and-Classification-of-Singing-Voice-Kum-Nam/60a2ad4c7db43bace75805054603747fcd062c0d
|
| 6 |
+
"""
|
| 7 |
+
import torch
|
| 8 |
+
from torch import nn
|
| 9 |
+
|
| 10 |
+
class JDCNet(nn.Module):
|
| 11 |
+
"""
|
| 12 |
+
Joint Detection and Classification Network model for singing voice melody.
|
| 13 |
+
"""
|
| 14 |
+
def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01):
|
| 15 |
+
super().__init__()
|
| 16 |
+
self.num_class = num_class
|
| 17 |
+
|
| 18 |
+
# input = (b, 1, 31, 513), b = batch size
|
| 19 |
+
self.conv_block = nn.Sequential(
|
| 20 |
+
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), # out: (b, 64, 31, 513)
|
| 21 |
+
nn.BatchNorm2d(num_features=64),
|
| 22 |
+
nn.LeakyReLU(leaky_relu_slope, inplace=True),
|
| 23 |
+
nn.Conv2d(64, 64, 3, padding=1, bias=False), # (b, 64, 31, 513)
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# res blocks
|
| 27 |
+
self.res_block1 = ResBlock(in_channels=64, out_channels=128) # (b, 128, 31, 128)
|
| 28 |
+
self.res_block2 = ResBlock(in_channels=128, out_channels=192) # (b, 192, 31, 32)
|
| 29 |
+
self.res_block3 = ResBlock(in_channels=192, out_channels=256) # (b, 256, 31, 8)
|
| 30 |
+
|
| 31 |
+
# pool block
|
| 32 |
+
self.pool_block = nn.Sequential(
|
| 33 |
+
nn.BatchNorm2d(num_features=256),
|
| 34 |
+
nn.LeakyReLU(leaky_relu_slope, inplace=True),
|
| 35 |
+
nn.MaxPool2d(kernel_size=(1, 4)), # (b, 256, 31, 2)
|
| 36 |
+
nn.Dropout(p=0.2),
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# maxpool layers (for auxiliary network inputs)
|
| 40 |
+
# in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2)
|
| 41 |
+
self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40))
|
| 42 |
+
# in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2)
|
| 43 |
+
self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20))
|
| 44 |
+
# in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2)
|
| 45 |
+
self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10))
|
| 46 |
+
|
| 47 |
+
# in = (b, 640, 31, 2), out = (b, 256, 31, 2)
|
| 48 |
+
self.detector_conv = nn.Sequential(
|
| 49 |
+
nn.Conv2d(640, 256, 1, bias=False),
|
| 50 |
+
nn.BatchNorm2d(256),
|
| 51 |
+
nn.LeakyReLU(leaky_relu_slope, inplace=True),
|
| 52 |
+
nn.Dropout(p=0.2),
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
# input: (b, 31, 512) - resized from (b, 256, 31, 2)
|
| 56 |
+
self.bilstm_classifier = nn.LSTM(
|
| 57 |
+
input_size=512, hidden_size=256,
|
| 58 |
+
batch_first=True, bidirectional=True) # (b, 31, 512)
|
| 59 |
+
|
| 60 |
+
# input: (b, 31, 512) - resized from (b, 256, 31, 2)
|
| 61 |
+
self.bilstm_detector = nn.LSTM(
|
| 62 |
+
input_size=512, hidden_size=256,
|
| 63 |
+
batch_first=True, bidirectional=True) # (b, 31, 512)
|
| 64 |
+
|
| 65 |
+
# input: (b * 31, 512)
|
| 66 |
+
self.classifier = nn.Linear(in_features=512, out_features=self.num_class) # (b * 31, num_class)
|
| 67 |
+
|
| 68 |
+
# input: (b * 31, 512)
|
| 69 |
+
self.detector = nn.Linear(in_features=512, out_features=2) # (b * 31, 2) - binary classifier
|
| 70 |
+
|
| 71 |
+
# initialize weights
|
| 72 |
+
self.apply(self.init_weights)
|
| 73 |
+
|
| 74 |
+
def get_feature_GAN(self, x):
|
| 75 |
+
seq_len = x.shape[-2]
|
| 76 |
+
x = x.float().transpose(-1, -2)
|
| 77 |
+
|
| 78 |
+
convblock_out = self.conv_block(x)
|
| 79 |
+
|
| 80 |
+
resblock1_out = self.res_block1(convblock_out)
|
| 81 |
+
resblock2_out = self.res_block2(resblock1_out)
|
| 82 |
+
resblock3_out = self.res_block3(resblock2_out)
|
| 83 |
+
poolblock_out = self.pool_block[0](resblock3_out)
|
| 84 |
+
poolblock_out = self.pool_block[1](poolblock_out)
|
| 85 |
+
|
| 86 |
+
return poolblock_out.transpose(-1, -2)
|
| 87 |
+
|
| 88 |
+
def get_feature(self, x):
|
| 89 |
+
seq_len = x.shape[-2]
|
| 90 |
+
x = x.float().transpose(-1, -2)
|
| 91 |
+
|
| 92 |
+
convblock_out = self.conv_block(x)
|
| 93 |
+
|
| 94 |
+
resblock1_out = self.res_block1(convblock_out)
|
| 95 |
+
resblock2_out = self.res_block2(resblock1_out)
|
| 96 |
+
resblock3_out = self.res_block3(resblock2_out)
|
| 97 |
+
poolblock_out = self.pool_block[0](resblock3_out)
|
| 98 |
+
poolblock_out = self.pool_block[1](poolblock_out)
|
| 99 |
+
|
| 100 |
+
return self.pool_block[2](poolblock_out)
|
| 101 |
+
|
| 102 |
+
def forward(self, x):
|
| 103 |
+
"""
|
| 104 |
+
Returns:
|
| 105 |
+
classification_prediction, detection_prediction
|
| 106 |
+
sizes: (b, 31, 722), (b, 31, 2)
|
| 107 |
+
"""
|
| 108 |
+
###############################
|
| 109 |
+
# forward pass for classifier #
|
| 110 |
+
###############################
|
| 111 |
+
seq_len = x.shape[-1]
|
| 112 |
+
x = x.float().transpose(-1, -2)
|
| 113 |
+
|
| 114 |
+
convblock_out = self.conv_block(x)
|
| 115 |
+
|
| 116 |
+
resblock1_out = self.res_block1(convblock_out)
|
| 117 |
+
resblock2_out = self.res_block2(resblock1_out)
|
| 118 |
+
resblock3_out = self.res_block3(resblock2_out)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
poolblock_out = self.pool_block[0](resblock3_out)
|
| 122 |
+
poolblock_out = self.pool_block[1](poolblock_out)
|
| 123 |
+
GAN_feature = poolblock_out.transpose(-1, -2)
|
| 124 |
+
poolblock_out = self.pool_block[2](poolblock_out)
|
| 125 |
+
|
| 126 |
+
# (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512)
|
| 127 |
+
classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, seq_len, 512))
|
| 128 |
+
classifier_out, _ = self.bilstm_classifier(classifier_out) # ignore the hidden states
|
| 129 |
+
|
| 130 |
+
classifier_out = classifier_out.contiguous().view((-1, 512)) # (b * 31, 512)
|
| 131 |
+
classifier_out = self.classifier(classifier_out)
|
| 132 |
+
classifier_out = classifier_out.view((-1, seq_len, self.num_class)) # (b, 31, num_class)
|
| 133 |
+
|
| 134 |
+
# sizes: (b, 31, 722), (b, 31, 2)
|
| 135 |
+
# classifier output consists of predicted pitch classes per frame
|
| 136 |
+
# detector output consists of: (isvoice, notvoice) estimates per frame
|
| 137 |
+
return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out
|
| 138 |
+
|
| 139 |
+
@staticmethod
|
| 140 |
+
def init_weights(m):
|
| 141 |
+
if isinstance(m, nn.Linear):
|
| 142 |
+
nn.init.kaiming_uniform_(m.weight)
|
| 143 |
+
if m.bias is not None:
|
| 144 |
+
nn.init.constant_(m.bias, 0)
|
| 145 |
+
elif isinstance(m, nn.Conv2d):
|
| 146 |
+
nn.init.xavier_normal_(m.weight)
|
| 147 |
+
elif isinstance(m, nn.LSTM) or isinstance(m, nn.LSTMCell):
|
| 148 |
+
for p in m.parameters():
|
| 149 |
+
if p.data is None:
|
| 150 |
+
continue
|
| 151 |
+
|
| 152 |
+
if len(p.shape) >= 2:
|
| 153 |
+
nn.init.orthogonal_(p.data)
|
| 154 |
+
else:
|
| 155 |
+
nn.init.normal_(p.data)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class ResBlock(nn.Module):
|
| 159 |
+
def __init__(self, in_channels: int, out_channels: int, leaky_relu_slope=0.01):
|
| 160 |
+
super().__init__()
|
| 161 |
+
self.downsample = in_channels != out_channels
|
| 162 |
+
|
| 163 |
+
# BN / LReLU / MaxPool layer before the conv layer - see Figure 1b in the paper
|
| 164 |
+
self.pre_conv = nn.Sequential(
|
| 165 |
+
nn.BatchNorm2d(num_features=in_channels),
|
| 166 |
+
nn.LeakyReLU(leaky_relu_slope, inplace=True),
|
| 167 |
+
nn.MaxPool2d(kernel_size=(1, 2)), # apply downsampling on the y axis only
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# conv layers
|
| 171 |
+
self.conv = nn.Sequential(
|
| 172 |
+
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
|
| 173 |
+
kernel_size=3, padding=1, bias=False),
|
| 174 |
+
nn.BatchNorm2d(out_channels),
|
| 175 |
+
nn.LeakyReLU(leaky_relu_slope, inplace=True),
|
| 176 |
+
nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False),
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
# 1 x 1 convolution layer to match the feature dimensions
|
| 180 |
+
self.conv1by1 = None
|
| 181 |
+
if self.downsample:
|
| 182 |
+
self.conv1by1 = nn.Conv2d(in_channels, out_channels, 1, bias=False)
|
| 183 |
+
|
| 184 |
+
def forward(self, x):
|
| 185 |
+
x = self.pre_conv(x)
|
| 186 |
+
if self.downsample:
|
| 187 |
+
x = self.conv(x) + self.conv1by1(x)
|
| 188 |
+
else:
|
| 189 |
+
x = self.conv(x) + x
|
| 190 |
+
return x
|
Utils_original/PLBERT/config.yml
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
log_dir: "Checkpoint"
|
| 2 |
+
mixed_precision: "fp16"
|
| 3 |
+
data_folder: "wikipedia_20220301.en.processed"
|
| 4 |
+
batch_size: 192
|
| 5 |
+
save_interval: 5000
|
| 6 |
+
log_interval: 10
|
| 7 |
+
num_process: 1 # number of GPUs
|
| 8 |
+
num_steps: 1000000
|
| 9 |
+
|
| 10 |
+
dataset_params:
|
| 11 |
+
tokenizer: "transfo-xl-wt103"
|
| 12 |
+
token_separator: " " # token used for phoneme separator (space)
|
| 13 |
+
token_mask: "M" # token used for phoneme mask (M)
|
| 14 |
+
word_separator: 3039 # token used for word separator (<formula>)
|
| 15 |
+
token_maps: "token_maps.pkl" # token map path
|
| 16 |
+
|
| 17 |
+
max_mel_length: 512 # max phoneme length
|
| 18 |
+
|
| 19 |
+
word_mask_prob: 0.15 # probability to mask the entire word
|
| 20 |
+
phoneme_mask_prob: 0.1 # probability to mask each phoneme
|
| 21 |
+
replace_prob: 0.2 # probablity to replace phonemes
|
| 22 |
+
|
| 23 |
+
model_params:
|
| 24 |
+
vocab_size: 178
|
| 25 |
+
hidden_size: 768
|
| 26 |
+
num_attention_heads: 12
|
| 27 |
+
intermediate_size: 2048
|
| 28 |
+
max_position_embeddings: 512
|
| 29 |
+
num_hidden_layers: 12
|
| 30 |
+
dropout: 0.1
|
Utils_original/PLBERT/step_1000000.t7
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0714ff85804db43e06b3b0ac5749bf90cf206257c6c5916e8a98c5933b4c21e0
|
| 3 |
+
size 25185187
|
Utils_original/PLBERT/util.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import yaml
|
| 3 |
+
import torch
|
| 4 |
+
from transformers import AlbertConfig, AlbertModel
|
| 5 |
+
|
| 6 |
+
class CustomAlbert(AlbertModel):
|
| 7 |
+
def forward(self, *args, **kwargs):
|
| 8 |
+
# Call the original forward method
|
| 9 |
+
outputs = super().forward(*args, **kwargs)
|
| 10 |
+
|
| 11 |
+
# Only return the last_hidden_state
|
| 12 |
+
return outputs.last_hidden_state
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def load_plbert(log_dir):
|
| 16 |
+
config_path = os.path.join(log_dir, "config.yml")
|
| 17 |
+
plbert_config = yaml.safe_load(open(config_path))
|
| 18 |
+
|
| 19 |
+
albert_base_configuration = AlbertConfig(**plbert_config['model_params'])
|
| 20 |
+
bert = CustomAlbert(albert_base_configuration)
|
| 21 |
+
|
| 22 |
+
files = os.listdir(log_dir)
|
| 23 |
+
ckpts = []
|
| 24 |
+
for f in os.listdir(log_dir):
|
| 25 |
+
if f.startswith("step_"): ckpts.append(f)
|
| 26 |
+
|
| 27 |
+
iters = [int(f.split('_')[-1].split('.')[0]) for f in ckpts if os.path.isfile(os.path.join(log_dir, f))]
|
| 28 |
+
iters = sorted(iters)[-1]
|
| 29 |
+
|
| 30 |
+
checkpoint = torch.load(log_dir + "/step_" + str(iters) + ".t7", map_location='cpu')
|
| 31 |
+
state_dict = checkpoint['net']
|
| 32 |
+
from collections import OrderedDict
|
| 33 |
+
new_state_dict = OrderedDict()
|
| 34 |
+
for k, v in state_dict.items():
|
| 35 |
+
name = k[7:] # remove `module.`
|
| 36 |
+
if name.startswith('encoder.'):
|
| 37 |
+
name = name[8:] # remove `encoder.`
|
| 38 |
+
new_state_dict[name] = v
|
| 39 |
+
del new_state_dict["embeddings.position_ids"]
|
| 40 |
+
bert.load_state_dict(new_state_dict, strict=False)
|
| 41 |
+
|
| 42 |
+
return bert
|
Utils_original/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
audio_clone/sangnq_clone.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:abfd825da72f2679d98a70288fdce777e9b8a2f9e0a1ae94ac80c4aa525e9fbf
|
| 3 |
+
size 1756557
|