Немножко обновил Vbach
Browse files- MVSepLess_Epsilon_Colab.ipynb +12 -5
- mvsepless/__init__.py +30 -12
- mvsepless/model_manager.py +92 -2
- mvsepless/vbach_infer.py +377 -46
- mvsepless/vbach_lib/algorithm/__init__.py +0 -0
- mvsepless/vbach_lib/algorithm/attentions.py +243 -0
- mvsepless/vbach_lib/algorithm/commons.py +138 -0
- mvsepless/vbach_lib/algorithm/discriminators.py +262 -0
- mvsepless/vbach_lib/algorithm/encoders.py +209 -0
- mvsepless/vbach_lib/algorithm/generators/__init__.py +0 -0
- mvsepless/vbach_lib/algorithm/generators/hifigan.py +228 -0
- mvsepless/vbach_lib/algorithm/generators/hifigan_mrf.py +374 -0
- mvsepless/vbach_lib/algorithm/generators/hifigan_nsf.py +235 -0
- mvsepless/vbach_lib/algorithm/generators/refinegan.py +451 -0
- mvsepless/vbach_lib/algorithm/modules.py +117 -0
- mvsepless/vbach_lib/algorithm/normalization.py +26 -0
- mvsepless/vbach_lib/algorithm/residuals.py +261 -0
- mvsepless/vbach_lib/algorithm/synthesizers.py +243 -0
MVSepLess_Epsilon_Colab.ipynb
CHANGED
|
@@ -75,6 +75,7 @@
|
|
| 75 |
"faiss-cpu==1.11\n",
|
| 76 |
"local-attention==1.11.1\n",
|
| 77 |
"tenacity==9.1.2\n",
|
|
|
|
| 78 |
"gdown\n",
|
| 79 |
"\"\"\"\n",
|
| 80 |
"with open(\"requirements.txt\", \"w\", encoding=\"utf-8\") as f:\n",
|
|
@@ -146,7 +147,7 @@
|
|
| 146 |
"\n",
|
| 147 |
"input_url = \"\" # @param {\"type\":\"string\",\"placeholder\":\"Ссылка на аудио/видео\"}\n",
|
| 148 |
"output_dir = \"/content/downloaded\" # @param {\"type\":\"string\",\"placeholder\":\"Директория для сохранения скачанного аудио\"}\n",
|
| 149 |
-
"cookies_path = \"\" # @param {\"type\":\"string\",\"placeholder\":\"Путь к cookies (д
|
| 150 |
"downloaded_file = dw_yt_dlp(url=input_url, output_dir=output_dir, cookie=cookies_path)\n"
|
| 151 |
],
|
| 152 |
"metadata": {
|
|
@@ -472,9 +473,15 @@
|
|
| 472 |
"#@markdown * Имя модели:\n",
|
| 473 |
"voicemodel_name = \"\" # @param {\"type\":\"string\",\"placeholder\":\"Имя модели\"}\n",
|
| 474 |
"# @markdown ---\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 475 |
"# @markdown ### Настройки преобразования\n",
|
| 476 |
-
"# @markdown * Альтернативный пайплайн\n",
|
| 477 |
-
"alt_pipeline = False # @param {type:\"boolean\"}\n",
|
| 478 |
"# @markdown * Влияние индекса\n",
|
| 479 |
"index_rate = 0 # @param {\"type\":\"slider\",\"min\":0,\"max\":1,\"step\":0.01}\n",
|
| 480 |
"# @markdown * Стерео режим\n",
|
|
@@ -520,7 +527,6 @@
|
|
| 520 |
" f\"--index_rate {index_rate}\",\n",
|
| 521 |
" f\"--output_name \\\"{output_name}\\\"\",\n",
|
| 522 |
" \"--format_name\",\n",
|
| 523 |
-
" \"--alt_pipeline\" if alt_pipeline == True else \"\",\n",
|
| 524 |
" f\"--stereo_mode {stereo_mode}\",\n",
|
| 525 |
" f\"--method_pitch {method_pitch}\",\n",
|
| 526 |
" f\"--pitch {pitch}\",\n",
|
|
@@ -529,7 +535,8 @@
|
|
| 529 |
" f\"--rms {rms}\",\n",
|
| 530 |
" f\"--protect {protect}\",\n",
|
| 531 |
" f\"--f0_min {f0_min}\",\n",
|
| 532 |
-
" f\"--f0_max {f0_max}\"\n",
|
|
|
|
| 533 |
"]\n",
|
| 534 |
"\n",
|
| 535 |
"!{\" \".join(cmd)}"
|
|
|
|
| 75 |
"faiss-cpu==1.11\n",
|
| 76 |
"local-attention==1.11.1\n",
|
| 77 |
"tenacity==9.1.2\n",
|
| 78 |
+
"pyworld\n",
|
| 79 |
"gdown\n",
|
| 80 |
"\"\"\"\n",
|
| 81 |
"with open(\"requirements.txt\", \"w\", encoding=\"utf-8\") as f:\n",
|
|
|
|
| 147 |
"\n",
|
| 148 |
"input_url = \"\" # @param {\"type\":\"string\",\"placeholder\":\"Ссылка на аудио/видео\"}\n",
|
| 149 |
"output_dir = \"/content/downloaded\" # @param {\"type\":\"string\",\"placeholder\":\"Директория для сохранения скачанного аудио\"}\n",
|
| 150 |
+
"cookies_path = \"\" # @param {\"type\":\"string\",\"placeholder\":\"Путь к cookies (дял успешного скачивания с ютуба)\"}\n",
|
| 151 |
"downloaded_file = dw_yt_dlp(url=input_url, output_dir=output_dir, cookie=cookies_path)\n"
|
| 152 |
],
|
| 153 |
"metadata": {
|
|
|
|
| 473 |
"#@markdown * Имя модели:\n",
|
| 474 |
"voicemodel_name = \"\" # @param {\"type\":\"string\",\"placeholder\":\"Имя модели\"}\n",
|
| 475 |
"# @markdown ---\n",
|
| 476 |
+
"# @markdown ### Hubert\n",
|
| 477 |
+
"# @markdown * Стэк\n",
|
| 478 |
+
"stack = \"fairseq\" # @param [\"fairseq\",\"transformers\"]\n",
|
| 479 |
+
"# @markdown * Имя модели для fairseq\n",
|
| 480 |
+
"fairseq_embedder = \"hubert_base\" # @param [\"hubert_base\",\"contentvec_base\",\"korean_hubert_base\",\"chinese_hubert_base\",\"portuguese_hubert_base\",\"japanese_hubert_base\"]\n",
|
| 481 |
+
"# @markdown * Имя модели для transformers\n",
|
| 482 |
+
"transformers_embedder = \"contentvec\" # @param [\"contentvec\",\"spin\",\"spin-v2\",\"chinese-hubert-base\",\"japanese-hubert-base\",\"korean-hubert-base\"]\n",
|
| 483 |
+
"# @markdown ---\n",
|
| 484 |
"# @markdown ### Настройки преобразования\n",
|
|
|
|
|
|
|
| 485 |
"# @markdown * Влияние индекса\n",
|
| 486 |
"index_rate = 0 # @param {\"type\":\"slider\",\"min\":0,\"max\":1,\"step\":0.01}\n",
|
| 487 |
"# @markdown * Стерео режим\n",
|
|
|
|
| 527 |
" f\"--index_rate {index_rate}\",\n",
|
| 528 |
" f\"--output_name \\\"{output_name}\\\"\",\n",
|
| 529 |
" \"--format_name\",\n",
|
|
|
|
| 530 |
" f\"--stereo_mode {stereo_mode}\",\n",
|
| 531 |
" f\"--method_pitch {method_pitch}\",\n",
|
| 532 |
" f\"--pitch {pitch}\",\n",
|
|
|
|
| 535 |
" f\"--rms {rms}\",\n",
|
| 536 |
" f\"--protect {protect}\",\n",
|
| 537 |
" f\"--f0_min {f0_min}\",\n",
|
| 538 |
+
" f\"--f0_max {f0_max}\",\n",
|
| 539 |
+
" f\"--embedder_name {fairseq_embedder}\" if stack == \"fairseq\" else f\"--embedder_name {transformers_embedder} --use_transformers\"\n",
|
| 540 |
"]\n",
|
| 541 |
"\n",
|
| 542 |
"!{\" \".join(cmd)}"
|
mvsepless/__init__.py
CHANGED
|
@@ -1202,14 +1202,17 @@ class Inverter_UI(MVSEPLESS):
|
|
| 1202 |
return None
|
| 1203 |
|
| 1204 |
class Vbach(MVSEPLESS):
|
| 1205 |
-
|
| 1206 |
-
|
| 1207 |
-
|
| 1208 |
-
|
| 1209 |
-
|
| 1210 |
-
|
| 1211 |
-
|
| 1212 |
-
|
|
|
|
|
|
|
|
|
|
| 1213 |
|
| 1214 |
def UI(self):
|
| 1215 |
with gr.Tab("Инференс"):
|
|
@@ -1251,7 +1254,7 @@ class Vbach(MVSEPLESS):
|
|
| 1251 |
outputs=[hop_length]
|
| 1252 |
)
|
| 1253 |
def show_mangio_crepe_hop_length(pitch_method):
|
| 1254 |
-
return gr.update(visible=True if pitch_method in ["mangio-crepe"] else False)
|
| 1255 |
stereo_mode = gr.Radio(
|
| 1256 |
choices=["mono", "left/right", "sim/dif"],
|
| 1257 |
label="Стерео режим",
|
|
@@ -1260,6 +1263,19 @@ class Vbach(MVSEPLESS):
|
|
| 1260 |
interactive=True
|
| 1261 |
)
|
| 1262 |
alt_pl = gr.Checkbox(label="Альтернативный пайплайн", info="Аудио нарезается на фиксированные чанки с перекрытием, что исключает любые щелчки на выходе (исключение - если есть щелчки в самой модели из-за грязного датасета)\nРазмер чанка вычисляется на основе 40% свободной видеопамяти", value=False, interactive=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1263 |
with gr.Accordion(label="Дополнительные настройки",open=False):
|
| 1264 |
with gr.Group():
|
| 1265 |
with gr.Row():
|
|
@@ -1297,10 +1313,12 @@ class Vbach(MVSEPLESS):
|
|
| 1297 |
format_output_name_check,
|
| 1298 |
output_format,
|
| 1299 |
stereo_mode,
|
| 1300 |
-
alt_pl
|
|
|
|
|
|
|
| 1301 |
], outputs=[converted_state, status]
|
| 1302 |
)
|
| 1303 |
-
def vbach_convert_batch(ifl, mn, pm, p, hl, ir, fr, rms, pr, f0min, f0max, on, fn, of, sm, alt_pipeline):
|
| 1304 |
output_converted_files = []
|
| 1305 |
progress = gr.Progress()
|
| 1306 |
if ifl:
|
|
@@ -1308,7 +1326,7 @@ class Vbach(MVSEPLESS):
|
|
| 1308 |
try:
|
| 1309 |
print(f"Файл {i} из {len(ifl)}: {file}")
|
| 1310 |
progress(progress=(i / len(ifl)), desc=f"Файл {i} из {len(ifl)}")
|
| 1311 |
-
out_conv = vbach_inference(input_file=file, model_name=mn, output_dir=tempfile.mkdtemp(), output_name=on, format_name=True if len(ifl) > 1 else fn, output_format=of, pitch=p, method_pitch=pm, output_bitrate=320, add_params={ "index_rate": ir,"filter_radius": fr,"protect": pr,"rms": rms,"mangio_crepe_hop_length": hl,"f0_min": f0min,"f0_max": f0max,"stereo_mode": sm }, pipeline_mode="alt" if alt_pipeline == True else "orig")
|
| 1312 |
output_converted_files.append(out_conv)
|
| 1313 |
except Exception as e:
|
| 1314 |
print(e)
|
|
|
|
| 1202 |
return None
|
| 1203 |
|
| 1204 |
class Vbach(MVSEPLESS):
|
| 1205 |
+
def __init__(self):
|
| 1206 |
+
self.pitch_methods = ("rmvpe+", "fcpe", "mangio-crepe", "mangio-crepe-tiny", "harvest", "pm")
|
| 1207 |
+
self.hop_length_values = (8, 512)
|
| 1208 |
+
self.index_rates_values = (0, 1)
|
| 1209 |
+
self.filter_radius_values = (0, 7)
|
| 1210 |
+
self.protect_values = (0, 0.5)
|
| 1211 |
+
self.rms_values = (0, 1)
|
| 1212 |
+
self.f0_min_values = (50, 3000)
|
| 1213 |
+
self.f0_max_values = (300, 6000)
|
| 1214 |
+
self.fairseq_embedders = list(self.vbach_model_manager.huberts_fairseq_dict.keys())
|
| 1215 |
+
self.transformers_embedders = list(self.vbach_model_manager.huberts_transformers_dict.keys())
|
| 1216 |
|
| 1217 |
def UI(self):
|
| 1218 |
with gr.Tab("Инференс"):
|
|
|
|
| 1254 |
outputs=[hop_length]
|
| 1255 |
)
|
| 1256 |
def show_mangio_crepe_hop_length(pitch_method):
|
| 1257 |
+
return gr.update(visible=True if pitch_method in ["mangio-crepe", "mangio-crepe-tiny"] else False)
|
| 1258 |
stereo_mode = gr.Radio(
|
| 1259 |
choices=["mono", "left/right", "sim/dif"],
|
| 1260 |
label="Стерео режим",
|
|
|
|
| 1263 |
interactive=True
|
| 1264 |
)
|
| 1265 |
alt_pl = gr.Checkbox(label="Альтернативный пайплайн", info="Аудио нарезается на фиксированные чанки с перекрытием, что исключает любые щелчки на выходе (исключение - если есть щелчки в самой модели из-за грязного датасета)\nРазмер чанка вычисляется на основе 40% свободной видеопамяти", value=False, interactive=True)
|
| 1266 |
+
|
| 1267 |
+
with gr.Group():
|
| 1268 |
+
embedder_name = gr.Radio(label="Модель Hubert", choices=self.fairseq_embedders, value=self.fairseq_embedders[0])
|
| 1269 |
+
transformers_mode = gr.Checkbox(label="Использовать стек Transformers", value=False, interactive=True)
|
| 1270 |
+
@transformers_mode.change(
|
| 1271 |
+
inputs=[transformers_mode], outputs=[embedder_name]
|
| 1272 |
+
)
|
| 1273 |
+
def change_embedders(tr_m):
|
| 1274 |
+
if tr_m:
|
| 1275 |
+
return gr.update(value=self.transformers_embedders[0], choices=self.transformers_embedders)
|
| 1276 |
+
else:
|
| 1277 |
+
return gr.update(choices=self.fairseq_embedders, value=self.fairseq_embedders[0])
|
| 1278 |
+
|
| 1279 |
with gr.Accordion(label="Дополнительные настройки",open=False):
|
| 1280 |
with gr.Group():
|
| 1281 |
with gr.Row():
|
|
|
|
| 1313 |
format_output_name_check,
|
| 1314 |
output_format,
|
| 1315 |
stereo_mode,
|
| 1316 |
+
alt_pl,
|
| 1317 |
+
embedder_name,
|
| 1318 |
+
transformers_mode
|
| 1319 |
], outputs=[converted_state, status]
|
| 1320 |
)
|
| 1321 |
+
def vbach_convert_batch(ifl, mn, pm, p, hl, ir, fr, rms, pr, f0min, f0max, on, fn, of, sm, alt_pipeline, em_n, tr_m):
|
| 1322 |
output_converted_files = []
|
| 1323 |
progress = gr.Progress()
|
| 1324 |
if ifl:
|
|
|
|
| 1326 |
try:
|
| 1327 |
print(f"Файл {i} из {len(ifl)}: {file}")
|
| 1328 |
progress(progress=(i / len(ifl)), desc=f"Файл {i} из {len(ifl)}")
|
| 1329 |
+
out_conv = vbach_inference(input_file=file, model_name=mn, output_dir=tempfile.mkdtemp(), output_name=on, format_name=True if len(ifl) > 1 else fn, output_format=of, pitch=p, method_pitch=pm, output_bitrate=320, add_params={ "index_rate": ir,"filter_radius": fr,"protect": pr,"rms": rms,"mangio_crepe_hop_length": hl,"f0_min": f0min,"f0_max": f0max,"stereo_mode": sm }, pipeline_mode="alt" if alt_pipeline == True else "orig", embedder_name=em_n, stack="transformers" if tr_m == True else "fairseq")
|
| 1330 |
output_converted_files.append(out_conv)
|
| 1331 |
except Exception as e:
|
| 1332 |
print(e)
|
mvsepless/model_manager.py
CHANGED
|
@@ -254,15 +254,105 @@ class VbachModelManager:
|
|
| 254 |
def __init__(self):
|
| 255 |
self.rmvpe_path = os.path.join(script_dir, "predictors", "rmvpe.pt")
|
| 256 |
self.fcpe_path = os.path.join(script_dir, "predictors", "fcpe.pt")
|
| 257 |
-
self.
|
| 258 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
self.voicemodels_dir = os.path.join(script_dir, "vbach_models_cache")
|
| 260 |
os.makedirs(self.voicemodels_dir, exist_ok=True)
|
| 261 |
self.voicemodels_info = os.path.join(self.voicemodels_dir, "vbach_models.json")
|
| 262 |
self.voicemodels: Dict[str, Dict[str, str]] = {}
|
| 263 |
self.download_requirements()
|
|
|
|
| 264 |
self.check_and_load()
|
| 265 |
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
|
| 267 |
def write_voicemodels_info(self):
|
| 268 |
with open(self.voicemodels_info, "w") as f:
|
|
|
|
| 254 |
def __init__(self):
|
| 255 |
self.rmvpe_path = os.path.join(script_dir, "predictors", "rmvpe.pt")
|
| 256 |
self.fcpe_path = os.path.join(script_dir, "predictors", "fcpe.pt")
|
| 257 |
+
self.custom_fairseq_huberts_dir = os.path.join(script_dir, "custom_fairseq_embedders")
|
| 258 |
+
self.custom_transformers_huberts_dir = os.path.join(script_dir, "custom_transformers_embedders")
|
| 259 |
+
self.huberts_fairseq_dict = {
|
| 260 |
+
"hubert_base": {
|
| 261 |
+
"url": "https://huggingface.co/Politrees/RVC_resources/resolve/main/embedders/hubert_base.pt",
|
| 262 |
+
"local_path": os.path.join(self.custom_fairseq_huberts_dir, "hubert_base.pt")
|
| 263 |
+
},
|
| 264 |
+
"contentvec_base": {
|
| 265 |
+
"url": "https://huggingface.co/Politrees/RVC_resources/resolve/main/embedders/contentvec_base.pt",
|
| 266 |
+
"local_path": os.path.join(self.custom_fairseq_huberts_dir, "contentvec_base.pt")
|
| 267 |
+
},
|
| 268 |
+
"korean_hubert_base": {
|
| 269 |
+
"url": "https://huggingface.co/Politrees/RVC_resources/resolve/main/embedders/korean_hubert_base.pt",
|
| 270 |
+
"local_path": os.path.join(self.custom_fairseq_huberts_dir, "korean_hubert_base.pt")
|
| 271 |
+
},
|
| 272 |
+
"chinese_hubert_base": {
|
| 273 |
+
"url": "https://huggingface.co/Politrees/RVC_resources/resolve/main/embedders/chinese_hubert_base.pt",
|
| 274 |
+
"local_path": os.path.join(self.custom_fairseq_huberts_dir, "chinese_hubert_base.pt")
|
| 275 |
+
},
|
| 276 |
+
"portuguese_hubert_base": {
|
| 277 |
+
"url": "https://huggingface.co/Politrees/RVC_resources/resolve/main/embedders/portuguese_hubert_base.pt",
|
| 278 |
+
"local_path": os.path.join(self.custom_fairseq_huberts_dir, "portuguese_hubert_base.pt")
|
| 279 |
+
},
|
| 280 |
+
"japanese_hubert_base": {
|
| 281 |
+
"url": "https://huggingface.co/Politrees/RVC_resources/resolve/main/embedders/japanese_hubert_base.pt",
|
| 282 |
+
"local_path": os.path.join(self.custom_fairseq_huberts_dir, "japanese_hubert_base.pt")
|
| 283 |
+
}
|
| 284 |
+
}
|
| 285 |
+
self.huberts_transformers_dict = {
|
| 286 |
+
"contentvec": {
|
| 287 |
+
"base_dir": os.path.join(self.custom_transformers_huberts_dir, "contentvec"),
|
| 288 |
+
"url_bin": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/contentvec/pytorch_model.bin",
|
| 289 |
+
"url_json": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/contentvec/config.json",
|
| 290 |
+
"local_bin": os.path.join(self.custom_transformers_huberts_dir, "contentvec", "pytorch_model.bin"),
|
| 291 |
+
"local_json": os.path.join(self.custom_transformers_huberts_dir, "contentvec", "config.json")
|
| 292 |
+
},
|
| 293 |
+
"spin": {
|
| 294 |
+
"base_dir": os.path.join(self.custom_transformers_huberts_dir, "spin"),
|
| 295 |
+
"url_bin": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/spin/pytorch_model.bin",
|
| 296 |
+
"url_json": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/spin/config.json",
|
| 297 |
+
"local_bin": os.path.join(self.custom_transformers_huberts_dir, "spin", "pytorch_model.bin"),
|
| 298 |
+
"local_json": os.path.join(self.custom_transformers_huberts_dir, "spin", "config.json")
|
| 299 |
+
},
|
| 300 |
+
"spin-v2": {
|
| 301 |
+
"base_dir": os.path.join(self.custom_transformers_huberts_dir, "spinv2"),
|
| 302 |
+
"url_bin": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/spin-v2/pytorch_model.bin",
|
| 303 |
+
"url_json": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/spin-v2/config.json",
|
| 304 |
+
"local_bin": os.path.join(self.custom_transformers_huberts_dir, "spinv2", "pytorch_model.bin"),
|
| 305 |
+
"local_json": os.path.join(self.custom_transformers_huberts_dir, "spinv2", "config.json")
|
| 306 |
+
},
|
| 307 |
+
"chinese-hubert-base": {
|
| 308 |
+
"base_dir": os.path.join(self.custom_transformers_huberts_dir, "chinese_hubert_base"),
|
| 309 |
+
"url_bin": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/chinese_hubert_base/pytorch_model.bin",
|
| 310 |
+
"url_json": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/chinese_hubert_base/config.json",
|
| 311 |
+
"local_bin": os.path.join(self.custom_transformers_huberts_dir, "chinese_hubert_base", "pytorch_model.bin"),
|
| 312 |
+
"local_json": os.path.join(self.custom_transformers_huberts_dir, "chinese_hubert_base", "config.json")
|
| 313 |
+
},
|
| 314 |
+
"japanese-hubert-base": {
|
| 315 |
+
"base_dir": os.path.join(self.custom_transformers_huberts_dir, "japanese_hubert_base"),
|
| 316 |
+
"url_bin": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/japanese_hubert_base/pytorch_model.bin",
|
| 317 |
+
"url_json": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/japanese_hubert_base/config.json",
|
| 318 |
+
"local_bin": os.path.join(self.custom_transformers_huberts_dir, "japanese_hubert_base", "pytorch_model.bin"),
|
| 319 |
+
"local_json": os.path.join(self.custom_transformers_huberts_dir, "japanese_hubert_base", "config.json")
|
| 320 |
+
},
|
| 321 |
+
"korean-hubert-base": {
|
| 322 |
+
"base_dir": os.path.join(self.custom_transformers_huberts_dir, "korean_hubert_base"),
|
| 323 |
+
"url_bin": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/korean_hubert_base/pytorch_model.bin",
|
| 324 |
+
"url_json": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/korean_hubert_base/config.json",
|
| 325 |
+
"local_bin": os.path.join(self.custom_transformers_huberts_dir, "korean_hubert_base", "pytorch_model.bin"),
|
| 326 |
+
"local_json": os.path.join(self.custom_transformers_huberts_dir, "korean_hubert_base", "config.json")
|
| 327 |
+
}
|
| 328 |
+
}
|
| 329 |
+
self.requirements = [["https://huggingface.co/Politrees/RVC_resources/resolve/main/predictors/rmvpe.pt", self.rmvpe_path], ["https://huggingface.co/Politrees/RVC_resources/resolve/main/predictors/fcpe.pt", self.fcpe_path]]
|
| 330 |
self.voicemodels_dir = os.path.join(script_dir, "vbach_models_cache")
|
| 331 |
os.makedirs(self.voicemodels_dir, exist_ok=True)
|
| 332 |
self.voicemodels_info = os.path.join(self.voicemodels_dir, "vbach_models.json")
|
| 333 |
self.voicemodels: Dict[str, Dict[str, str]] = {}
|
| 334 |
self.download_requirements()
|
| 335 |
+
self.check_hubert("hubert_base")
|
| 336 |
self.check_and_load()
|
| 337 |
pass
|
| 338 |
+
|
| 339 |
+
def check_hubert(self, embedder_name):
|
| 340 |
+
if embedder_name in self.huberts_fairseq_dict:
|
| 341 |
+
if not os.path.exists(self.huberts_fairseq_dict[embedder_name]["local_path"]):
|
| 342 |
+
dw_file(self.huberts_fairseq_dict[embedder_name]["url"], self.huberts_fairseq_dict[embedder_name]["local_path"])
|
| 343 |
+
return self.huberts_fairseq_dict[embedder_name]["local_path"]
|
| 344 |
+
else:
|
| 345 |
+
return None
|
| 346 |
+
|
| 347 |
+
def check_hubert_transformers(self, embedder_name):
|
| 348 |
+
if embedder_name in self.huberts_transformers_dict:
|
| 349 |
+
os.makedirs(self.huberts_transformers_dict[embedder_name]["base_dir"], exist_ok=True)
|
| 350 |
+
if not os.path.exists(self.huberts_transformers_dict[embedder_name]["local_bin"]) and not os.path.exists(self.huberts_transformers_dict[embedder_name]["local_json"]):
|
| 351 |
+
dw_file(self.huberts_transformers_dict[embedder_name]["url_bin"], self.huberts_transformers_dict[embedder_name]["local_bin"])
|
| 352 |
+
dw_file(self.huberts_transformers_dict[embedder_name]["url_json"], self.huberts_transformers_dict[embedder_name]["local_json"])
|
| 353 |
+
return self.huberts_transformers_dict[embedder_name]["base_dir"]
|
| 354 |
+
else:
|
| 355 |
+
return None
|
| 356 |
|
| 357 |
def write_voicemodels_info(self):
|
| 358 |
with open(self.voicemodels_info, "w") as f:
|
mvsepless/vbach_infer.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import os
|
| 2 |
import gc
|
| 3 |
import torch
|
|
|
|
| 4 |
import torch.nn.functional as F
|
| 5 |
import torchcrepe
|
| 6 |
import faiss
|
|
@@ -9,6 +10,11 @@ import math
|
|
| 9 |
import numpy as np
|
| 10 |
from scipy import signal
|
| 11 |
import argparse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
script_dir = os.path.dirname(os.path.abspath(__file__))
|
| 13 |
|
| 14 |
FILTER_ORDER = 5
|
|
@@ -44,6 +50,26 @@ namer = Namer()
|
|
| 44 |
RMVPE_DIR = model_manager.rmvpe_path
|
| 45 |
FCPE_DIR = model_manager.fcpe_path
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
def remove_center(input_array, samplerate, rdf=0.99999, window_size=2048, overlap=2, window_type="blackman"):
|
| 48 |
|
| 49 |
left = input_array[0]
|
|
@@ -137,7 +163,7 @@ class AudioProcessor:
|
|
| 137 |
|
| 138 |
# Класс для преобразования голоса
|
| 139 |
class VC:
|
| 140 |
-
def __init__(self, tgt_sr, config):
|
| 141 |
"""
|
| 142 |
Инициализация параметров для преобразования голоса.
|
| 143 |
"""
|
|
@@ -156,10 +182,11 @@ class VC:
|
|
| 156 |
self.t_max = self.sample_rate * self.x_max
|
| 157 |
self.time_step = self.window / self.sample_rate * 1000
|
| 158 |
self.device = config.device
|
|
|
|
| 159 |
|
| 160 |
-
def
|
| 161 |
"""
|
| 162 |
-
Получает F0 с использованием модели crepe.
|
| 163 |
"""
|
| 164 |
x = x.astype(np.float32)
|
| 165 |
x /= np.quantile(np.abs(x), 0.999)
|
|
@@ -219,12 +246,36 @@ class VC:
|
|
| 219 |
"""
|
| 220 |
Получает F0 с использованием выбранного метода.
|
| 221 |
"""
|
| 222 |
-
global
|
|
|
|
| 223 |
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
| 224 |
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
| 225 |
|
| 226 |
-
if f0_method
|
| 227 |
-
f0 = self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
|
| 229 |
elif f0_method == "rmvpe+":
|
| 230 |
params = {
|
|
@@ -274,7 +325,7 @@ class VC:
|
|
| 274 |
f0_coarse = np.rint(f0_mel).astype(int)
|
| 275 |
return f0_coarse, f0bak
|
| 276 |
|
| 277 |
-
def
|
| 278 |
self,
|
| 279 |
model,
|
| 280 |
net_g,
|
|
@@ -375,6 +426,91 @@ class VC:
|
|
| 375 |
|
| 376 |
return audio1
|
| 377 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 378 |
def pipeline(
|
| 379 |
self,
|
| 380 |
model,
|
|
@@ -578,8 +714,6 @@ class VC:
|
|
| 578 |
f0_min=50,
|
| 579 |
f0_max=1100,
|
| 580 |
):
|
| 581 |
-
import torch
|
| 582 |
-
import numpy as np
|
| 583 |
|
| 584 |
device = self.device
|
| 585 |
audio = signal.filtfilt(bh, ah, audio)
|
|
@@ -798,7 +932,17 @@ class VC:
|
|
| 798 |
# Для CPU или в случае ошибки используем консервативный размер
|
| 799 |
return min(base_chunk_size, audio_length)
|
| 800 |
|
| 801 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 802 |
|
| 803 |
def overlay_mono_on_stereo(monoaudio, stereoaudio, gain=0.5):
|
| 804 |
if monoaudio is None or stereoaudio is None:
|
|
@@ -992,35 +1136,128 @@ def load_hubert(device, is_half, model_path):
|
|
| 992 |
hubert.eval()
|
| 993 |
return hubert
|
| 994 |
|
| 995 |
-
|
| 996 |
-
|
| 997 |
-
|
| 998 |
-
|
| 999 |
-
|
| 1000 |
-
|
| 1001 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1002 |
)
|
| 1003 |
-
|
| 1004 |
-
|
| 1005 |
-
|
| 1006 |
-
|
| 1007 |
-
|
| 1008 |
-
|
| 1009 |
-
|
| 1010 |
-
|
| 1011 |
-
|
| 1012 |
-
|
| 1013 |
-
|
| 1014 |
-
|
| 1015 |
-
|
| 1016 |
-
|
| 1017 |
-
|
| 1018 |
-
|
| 1019 |
-
|
| 1020 |
-
|
| 1021 |
-
|
| 1022 |
-
|
| 1023 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1024 |
|
| 1025 |
def rvc_infer(
|
| 1026 |
index_path,
|
|
@@ -1039,6 +1276,7 @@ def rvc_infer(
|
|
| 1039 |
hop_length,
|
| 1040 |
vc,
|
| 1041 |
hubert_model,
|
|
|
|
| 1042 |
f0_min=50,
|
| 1043 |
f0_max=1100,
|
| 1044 |
format_output="wav",
|
|
@@ -1053,7 +1291,6 @@ def rvc_infer(
|
|
| 1053 |
pipeline = vc.pipeline
|
| 1054 |
|
| 1055 |
mid, left, right = loadaudio(input_path, 16000, stereo_mode)
|
| 1056 |
-
pitch_guidance = cpt.get("f0", 1)
|
| 1057 |
|
| 1058 |
if stereo_mode == "mono":
|
| 1059 |
if mid is None:
|
|
@@ -1263,15 +1500,87 @@ def voice_conversion(
|
|
| 1263 |
format_output,
|
| 1264 |
output_bitrate,
|
| 1265 |
stereo_mode,
|
| 1266 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1267 |
pipeline_mode="orig"
|
| 1268 |
):
|
| 1269 |
rvc_model_path, rvc_index_path = load_rvc_model(voice_model)
|
| 1270 |
|
| 1271 |
config = Config()
|
| 1272 |
-
|
| 1273 |
-
|
| 1274 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1275 |
)
|
| 1276 |
|
| 1277 |
outputaudio = rvc_infer(
|
|
@@ -1291,6 +1600,7 @@ def voice_conversion(
|
|
| 1291 |
hop_length,
|
| 1292 |
vc,
|
| 1293 |
hubert_model,
|
|
|
|
| 1294 |
f0_min,
|
| 1295 |
f0_max,
|
| 1296 |
format_output,
|
|
@@ -1304,6 +1614,7 @@ def voice_conversion(
|
|
| 1304 |
torch.cuda.empty_cache()
|
| 1305 |
return outputaudio
|
| 1306 |
|
|
|
|
| 1307 |
def vbach_inference(
|
| 1308 |
input_file: str,
|
| 1309 |
model_name: str,
|
|
@@ -1315,6 +1626,8 @@ def vbach_inference(
|
|
| 1315 |
method_pitch: str,
|
| 1316 |
format_name: bool = False,
|
| 1317 |
pipeline_mode: str = "orig",
|
|
|
|
|
|
|
| 1318 |
add_params: dict = {
|
| 1319 |
"index_rate": 0,
|
| 1320 |
"filter_radius": 3,
|
|
@@ -1326,6 +1639,13 @@ def vbach_inference(
|
|
| 1326 |
"stereo_mode": "mono"
|
| 1327 |
}
|
| 1328 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1329 |
stereo_mode = add_params.get("stereo_mode", "mono")
|
| 1330 |
index_rate = add_params.get("index_rate", 0)
|
| 1331 |
filter_radius = add_params.get("filter_radius", 3)
|
|
@@ -1355,7 +1675,7 @@ def vbach_inference(
|
|
| 1355 |
final_output_name = output_name
|
| 1356 |
|
| 1357 |
final_output_path = os.path.join(output_dir, f"{final_output_name}.{output_format}")
|
| 1358 |
-
output_converted_voice =
|
| 1359 |
print(f"Инференс завершен\nПуть к выходному файлу: \"{output_converted_voice}\"")
|
| 1360 |
return output_converted_voice
|
| 1361 |
|
|
@@ -1463,13 +1783,24 @@ if __name__ == "__main__":
|
|
| 1463 |
action="store_true",
|
| 1464 |
help="Альтернативный пайплайн",
|
| 1465 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1466 |
|
| 1467 |
args = parser.parse_args()
|
| 1468 |
|
| 1469 |
if args.input:
|
| 1470 |
if os.path.exists(args.input) and os.path.isfile(args.input):
|
| 1471 |
if audio.check(args.input):
|
| 1472 |
-
vbach_inference(input_file=args.input, model_name=args.model_name, output_dir=args.output_dir, output_name=args.output_name, output_bitrate=args.output_bitrate, output_format=args.output_format, pitch=args.pitch, method_pitch=args.method_pitch, format_name=args.format_name, add_params={ "index_rate": args.index_rate,"filter_radius": args.filter_radius,"protect": args.protect,"rms": args.rms,"mangio_crepe_hop_length": args.hop_length,"f0_min": args.f0_min,"f0_max": args.f0_max,"stereo_mode": args.stereo_mode}, pipeline_mode="alt" if args.alt_pipeline == True else "orig")
|
| 1473 |
elif os.path.exists(args.input) and os.path.isdir(args.input):
|
| 1474 |
list_valid_files = []
|
| 1475 |
for file in os.listdir(args.input):
|
|
@@ -1479,4 +1810,4 @@ if __name__ == "__main__":
|
|
| 1479 |
if list_valid_files:
|
| 1480 |
for i, vocals_file in enumerate(list_valid_files, start=1):
|
| 1481 |
print(f"Файл {i} из {len(list_valid_files)}: {vocals_file}")
|
| 1482 |
-
vbach_inference(input_file=vocals_file, model_name=args.model_name, output_dir=args.output_dir, output_name=args.output_name, output_bitrate=args.output_bitrate, output_format=args.output_format, pitch=args.pitch, method_pitch=args.method_pitch, format_name=True if len(list_valid_files) > 1 else args.format_name, add_params={ "index_rate": args.index_rate,"filter_radius": args.filter_radius,"protect": args.protect,"rms": args.rms,"mangio_crepe_hop_length": args.hop_length,"f0_min": args.f0_min,"f0_max": args.f0_max,"stereo_mode": args.stereo_mode}, pipeline_mode="alt" if args.alt_pipeline == True else "orig")
|
|
|
|
| 1 |
import os
|
| 2 |
import gc
|
| 3 |
import torch
|
| 4 |
+
from torch import nn
|
| 5 |
import torch.nn.functional as F
|
| 6 |
import torchcrepe
|
| 7 |
import faiss
|
|
|
|
| 10 |
import numpy as np
|
| 11 |
from scipy import signal
|
| 12 |
import argparse
|
| 13 |
+
from functools import lru_cache
|
| 14 |
+
import pyworld
|
| 15 |
+
import parselmouth
|
| 16 |
+
from transformers import HubertModel
|
| 17 |
+
from typing import Tuple, Any, Dict
|
| 18 |
script_dir = os.path.dirname(os.path.abspath(__file__))
|
| 19 |
|
| 20 |
FILTER_ORDER = 5
|
|
|
|
| 50 |
RMVPE_DIR = model_manager.rmvpe_path
|
| 51 |
FCPE_DIR = model_manager.fcpe_path
|
| 52 |
|
| 53 |
+
input_audio_path2wav = {}
|
| 54 |
+
|
| 55 |
+
class HubertModelWithFinalProj(HubertModel):
|
| 56 |
+
def __init__(self, config):
|
| 57 |
+
super().__init__(config)
|
| 58 |
+
self.final_proj = nn.Linear(config.hidden_size, config.classifier_proj_size)
|
| 59 |
+
|
| 60 |
+
@lru_cache
|
| 61 |
+
def get_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
|
| 62 |
+
audio = input_audio_path2wav[input_audio_path]
|
| 63 |
+
f0, t = pyworld.harvest(
|
| 64 |
+
audio,
|
| 65 |
+
fs=fs,
|
| 66 |
+
f0_ceil=f0max,
|
| 67 |
+
f0_floor=f0min,
|
| 68 |
+
frame_period=frame_period,
|
| 69 |
+
)
|
| 70 |
+
f0 = pyworld.stonemask(audio, f0, t, fs)
|
| 71 |
+
return f0
|
| 72 |
+
|
| 73 |
def remove_center(input_array, samplerate, rdf=0.99999, window_size=2048, overlap=2, window_type="blackman"):
|
| 74 |
|
| 75 |
left = input_array[0]
|
|
|
|
| 163 |
|
| 164 |
# Класс для преобразования голоса
|
| 165 |
class VC:
|
| 166 |
+
def __init__(self, tgt_sr, config, stack="fairseq"):
|
| 167 |
"""
|
| 168 |
Инициализация параметров для преобразования голоса.
|
| 169 |
"""
|
|
|
|
| 182 |
self.t_max = self.sample_rate * self.x_max
|
| 183 |
self.time_step = self.window / self.sample_rate * 1000
|
| 184 |
self.device = config.device
|
| 185 |
+
self.vc = self._vc_transformers if stack == "transformers" else self._vc
|
| 186 |
|
| 187 |
+
def get_f0_mangio_crepe(self, x, f0_min, f0_max, p_len, hop_length, model="full"):
|
| 188 |
"""
|
| 189 |
+
Получает F0 с использованием модели mangio-crepe.
|
| 190 |
"""
|
| 191 |
x = x.astype(np.float32)
|
| 192 |
x /= np.quantile(np.abs(x), 0.999)
|
|
|
|
| 246 |
"""
|
| 247 |
Получает F0 с использованием выбранного метода.
|
| 248 |
"""
|
| 249 |
+
global input_audio_path2wav
|
| 250 |
+
time_step = self.window / self.sample_rate * 1000
|
| 251 |
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
| 252 |
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
| 253 |
|
| 254 |
+
if f0_method in ["mangio-crepe", "mangio-crepe-tiny"]:
|
| 255 |
+
f0 = self.get_f0_mangio_crepe(x, f0_min, f0_max, p_len, int(hop_length), "tiny" if f0_method == "mangio-crepe-tiny" else "full")
|
| 256 |
+
|
| 257 |
+
elif f0_method == "harvest":
|
| 258 |
+
input_audio_path2wav = {}
|
| 259 |
+
input_audio_path2wav[inputaudio_path] = x.astype(np.double)
|
| 260 |
+
f0 = get_harvest_f0(inputaudio_path, self.sample_rate, f0_max, f0_min, 10)
|
| 261 |
+
if filter_radius > 2:
|
| 262 |
+
f0 = signal.medfilt(f0, 3)
|
| 263 |
+
elif f0_method == "pm":
|
| 264 |
+
f0 = (
|
| 265 |
+
parselmouth.Sound(x, self.sample_rate)
|
| 266 |
+
.to_pitch_ac(
|
| 267 |
+
time_step=time_step / 1000,
|
| 268 |
+
voicing_threshold=0.6,
|
| 269 |
+
pitch_floor=f0_min,
|
| 270 |
+
pitch_ceiling=f0_max,
|
| 271 |
+
)
|
| 272 |
+
.selected_array["frequency"]
|
| 273 |
+
)
|
| 274 |
+
pad_size = (p_len - len(f0) + 1) // 2
|
| 275 |
+
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
| 276 |
+
f0 = np.pad(
|
| 277 |
+
f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
|
| 278 |
+
)
|
| 279 |
|
| 280 |
elif f0_method == "rmvpe+":
|
| 281 |
params = {
|
|
|
|
| 325 |
f0_coarse = np.rint(f0_mel).astype(int)
|
| 326 |
return f0_coarse, f0bak
|
| 327 |
|
| 328 |
+
def _vc(
|
| 329 |
self,
|
| 330 |
model,
|
| 331 |
net_g,
|
|
|
|
| 426 |
|
| 427 |
return audio1
|
| 428 |
|
| 429 |
+
def _vc_transformers(
|
| 430 |
+
self,
|
| 431 |
+
model,
|
| 432 |
+
net_g,
|
| 433 |
+
sid,
|
| 434 |
+
audio0,
|
| 435 |
+
pitch,
|
| 436 |
+
pitchf,
|
| 437 |
+
index,
|
| 438 |
+
big_npy,
|
| 439 |
+
index_rate,
|
| 440 |
+
version,
|
| 441 |
+
protect,
|
| 442 |
+
):
|
| 443 |
+
"""
|
| 444 |
+
Performs voice conversion on a given audio segment.
|
| 445 |
+
|
| 446 |
+
Args:
|
| 447 |
+
model: The feature extractor model.
|
| 448 |
+
net_g: The generative model for synthesizing speech.
|
| 449 |
+
sid: Speaker ID for the target voice.
|
| 450 |
+
audio0: The input audio segment.
|
| 451 |
+
pitch: Quantized F0 contour for pitch guidance.
|
| 452 |
+
pitchf: Original F0 contour for pitch guidance.
|
| 453 |
+
index: FAISS index for speaker embedding retrieval.
|
| 454 |
+
big_npy: Speaker embeddings stored in a NumPy array.
|
| 455 |
+
index_rate: Blending rate for speaker embedding retrieval.
|
| 456 |
+
version: Model version (Keep to support old models).
|
| 457 |
+
protect: Protection level for preserving the original pitch.
|
| 458 |
+
"""
|
| 459 |
+
with torch.no_grad():
|
| 460 |
+
pitch_guidance = pitch != None and pitchf != None
|
| 461 |
+
# prepare source audio
|
| 462 |
+
feats = torch.from_numpy(audio0).float()
|
| 463 |
+
feats = feats.mean(-1) if feats.dim() == 2 else feats
|
| 464 |
+
assert feats.dim() == 1, feats.dim()
|
| 465 |
+
feats = feats.view(1, -1).to(self.device)
|
| 466 |
+
# extract features
|
| 467 |
+
feats = model(feats)["last_hidden_state"]
|
| 468 |
+
feats = (
|
| 469 |
+
model.final_proj(feats[0]).unsqueeze(0) if version == "v1" else feats
|
| 470 |
+
)
|
| 471 |
+
# make a copy for pitch guidance and protection
|
| 472 |
+
feats0 = feats.clone() if pitch_guidance else None
|
| 473 |
+
if (
|
| 474 |
+
index
|
| 475 |
+
): # set by parent function, only true if index is available, loaded, and index rate > 0
|
| 476 |
+
feats = self._retrieve_speaker_embeddings(
|
| 477 |
+
feats, index, big_npy, index_rate
|
| 478 |
+
)
|
| 479 |
+
# feature upsampling
|
| 480 |
+
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(
|
| 481 |
+
0, 2, 1
|
| 482 |
+
)
|
| 483 |
+
# adjust the length if the audio is short
|
| 484 |
+
p_len = min(audio0.shape[0] // self.window, feats.shape[1])
|
| 485 |
+
if pitch_guidance:
|
| 486 |
+
feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
|
| 487 |
+
0, 2, 1
|
| 488 |
+
)
|
| 489 |
+
pitch, pitchf = pitch[:, :p_len], pitchf[:, :p_len]
|
| 490 |
+
# Pitch protection blending
|
| 491 |
+
if protect < 0.5:
|
| 492 |
+
pitchff = pitchf.clone()
|
| 493 |
+
pitchff[pitchf > 0] = 1
|
| 494 |
+
pitchff[pitchf < 1] = protect
|
| 495 |
+
feats = feats * pitchff.unsqueeze(-1) + feats0 * (
|
| 496 |
+
1 - pitchff.unsqueeze(-1)
|
| 497 |
+
)
|
| 498 |
+
feats = feats.to(feats0.dtype)
|
| 499 |
+
else:
|
| 500 |
+
pitch, pitchf = None, None
|
| 501 |
+
p_len = torch.tensor([p_len], device=self.device).long()
|
| 502 |
+
audio1 = (
|
| 503 |
+
(net_g.infer(feats.float(), p_len, pitch, pitchf.float(), sid)[0][0, 0])
|
| 504 |
+
.data.cpu()
|
| 505 |
+
.float()
|
| 506 |
+
.numpy()
|
| 507 |
+
)
|
| 508 |
+
# clean up
|
| 509 |
+
del feats, feats0, p_len
|
| 510 |
+
if torch.cuda.is_available():
|
| 511 |
+
torch.cuda.empty_cache()
|
| 512 |
+
return audio1
|
| 513 |
+
|
| 514 |
def pipeline(
|
| 515 |
self,
|
| 516 |
model,
|
|
|
|
| 714 |
f0_min=50,
|
| 715 |
f0_max=1100,
|
| 716 |
):
|
|
|
|
|
|
|
| 717 |
|
| 718 |
device = self.device
|
| 719 |
audio = signal.filtfilt(bh, ah, audio)
|
|
|
|
| 932 |
# Для CPU или в случае ошибки используем консервативный размер
|
| 933 |
return min(base_chunk_size, audio_length)
|
| 934 |
|
| 935 |
+
def _retrieve_speaker_embeddings(self, feats, index, big_npy, index_rate): # для Transformers
|
| 936 |
+
npy = feats[0].cpu().numpy()
|
| 937 |
+
score, ix = index.search(npy, k=8)
|
| 938 |
+
weight = np.square(1 / score)
|
| 939 |
+
weight /= weight.sum(axis=1, keepdims=True)
|
| 940 |
+
npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
|
| 941 |
+
feats = (
|
| 942 |
+
torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
|
| 943 |
+
+ (1 - index_rate) * feats
|
| 944 |
+
)
|
| 945 |
+
return feats
|
| 946 |
|
| 947 |
def overlay_mono_on_stereo(monoaudio, stereoaudio, gain=0.5):
|
| 948 |
if monoaudio is None or stereoaudio is None:
|
|
|
|
| 1136 |
hubert.eval()
|
| 1137 |
return hubert
|
| 1138 |
|
| 1139 |
+
def get_vc(
|
| 1140 |
+
device: torch.device,
|
| 1141 |
+
is_half: bool,
|
| 1142 |
+
config: Any,
|
| 1143 |
+
model_path: str,
|
| 1144 |
+
stack: Any
|
| 1145 |
+
) -> Tuple[Dict[str, Any], str, torch.nn.Module, int, VC, int]:
|
| 1146 |
+
"""
|
| 1147 |
+
Загружает модель RVC для голосовой конвертации.
|
| 1148 |
+
|
| 1149 |
+
Args:
|
| 1150 |
+
device: Устройство для загрузки модели (CPU/GPU)
|
| 1151 |
+
is_half: Использовать ли половинную точность (float16)
|
| 1152 |
+
config: Конфигурация модели
|
| 1153 |
+
model_path: Путь к файлу модели (.pth)
|
| 1154 |
+
stack: Объект стека для инициализации VC
|
| 1155 |
+
|
| 1156 |
+
Returns:
|
| 1157 |
+
Tuple containing:
|
| 1158 |
+
- cpt: Checkpoint модели
|
| 1159 |
+
- version: Версия модели
|
| 1160 |
+
- net_g: Сетевой генератор
|
| 1161 |
+
- tgt_sr: Целевая частота дискретизации
|
| 1162 |
+
- vc: Объект голосовой конвертации
|
| 1163 |
+
- use_f0: Использование F0 (0/1)
|
| 1164 |
+
|
| 1165 |
+
Raises:
|
| 1166 |
+
ValueError: Если файл модели имеет некорректный формат
|
| 1167 |
+
FileNotFoundError: Если файл модели не найден
|
| 1168 |
+
"""
|
| 1169 |
+
|
| 1170 |
+
# Проверка существования файла
|
| 1171 |
+
if not os.path.isfile(model_path):
|
| 1172 |
+
raise FileNotFoundError(f"Файл модели не найден: {model_path}")
|
| 1173 |
+
|
| 1174 |
+
try:
|
| 1175 |
+
# Загружаем состояние модели
|
| 1176 |
+
cpt = torch.load(model_path, map_location="cpu", weights_only=True)
|
| 1177 |
+
|
| 1178 |
+
# Проверяем структуру загруженного файла
|
| 1179 |
+
required_keys = ["config", "weight"]
|
| 1180 |
+
missing_keys = [key for key in required_keys if key not in cpt]
|
| 1181 |
+
|
| 1182 |
+
if missing_keys:
|
| 1183 |
+
raise ValueError(
|
| 1184 |
+
f"Некорректный формат модели {model_path}. "
|
| 1185 |
+
f"Отсутствующие ключи: {missing_keys}. "
|
| 1186 |
+
"Используйте модель RVC формата."
|
| 1187 |
+
)
|
| 1188 |
+
|
| 1189 |
+
# Извлекаем параметры модели
|
| 1190 |
+
tgt_sr = cpt["config"][-1]
|
| 1191 |
+
|
| 1192 |
+
# Обновляем размерность в config на основе весов
|
| 1193 |
+
emb_weight_shape = cpt["weight"]["emb_g.weight"].shape
|
| 1194 |
+
cpt["config"][-3] = emb_weight_shape[0] # Количество спикеров
|
| 1195 |
+
|
| 1196 |
+
# Получаем дополнительные параметры модели
|
| 1197 |
+
use_f0 = cpt.get("f0", 1)
|
| 1198 |
+
version = cpt.get("version", "v1")
|
| 1199 |
+
vocoder = cpt.get("vocoder", "HiFi-GAN")
|
| 1200 |
+
|
| 1201 |
+
# Определяем размерность входных данных в зависимости от версии
|
| 1202 |
+
text_enc_hidden_dim = 768 if version == "v2" else 256
|
| 1203 |
+
|
| 1204 |
+
print(f"Загружаем модель: {os.path.basename(model_path)}")
|
| 1205 |
+
print(f"Версия: {version}, F0: {use_f0}, Частота: {tgt_sr}Hz")
|
| 1206 |
+
print(f"Количество спикеров: {emb_weight_shape[0]}")
|
| 1207 |
+
|
| 1208 |
+
# Инициализируем синтезатор
|
| 1209 |
+
net_g = Synthesizer(
|
| 1210 |
+
*cpt["config"],
|
| 1211 |
+
use_f0=use_f0,
|
| 1212 |
+
text_enc_hidden_dim=text_enc_hidden_dim,
|
| 1213 |
+
vocoder=vocoder
|
| 1214 |
)
|
| 1215 |
+
|
| 1216 |
+
# Удаляем ненужный слой enc_q
|
| 1217 |
+
if hasattr(net_g, 'enc_q'):
|
| 1218 |
+
del net_g.enc_q
|
| 1219 |
+
else:
|
| 1220 |
+
print("Предупреждение: слой enc_q не найден в модели")
|
| 1221 |
+
|
| 1222 |
+
# Загружаем веса с проверкой
|
| 1223 |
+
missing_keys, unexpected_keys = net_g.load_state_dict(
|
| 1224 |
+
cpt["weight"],
|
| 1225 |
+
strict=False
|
| 1226 |
+
)
|
| 1227 |
+
|
| 1228 |
+
if missing_keys:
|
| 1229 |
+
print(f"Предупреждение: отсутствующие ключи при загрузке модели: {missing_keys}")
|
| 1230 |
+
|
| 1231 |
+
if unexpected_keys:
|
| 1232 |
+
print(f"Предупреждение: неожиданные ключи при загрузке модели: {unexpected_keys}")
|
| 1233 |
+
|
| 1234 |
+
# Настройка модели для inference
|
| 1235 |
+
net_g.eval()
|
| 1236 |
+
|
| 1237 |
+
# Перемещаем модель на нужное устройство и устанавливаем точность
|
| 1238 |
+
net_g = net_g.to(device)
|
| 1239 |
+
if is_half:
|
| 1240 |
+
net_g = net_g.half()
|
| 1241 |
+
print("Модель переведена в половинную точность (float16)")
|
| 1242 |
+
else:
|
| 1243 |
+
net_g = net_g.float()
|
| 1244 |
+
print("Модель использует полную точность (float32)")
|
| 1245 |
+
|
| 1246 |
+
# Инициализируем объект конвертера голоса
|
| 1247 |
+
vc = VC(tgt_sr, config, stack)
|
| 1248 |
+
|
| 1249 |
+
# Очистка памяти CPU
|
| 1250 |
+
if torch.cuda.is_available():
|
| 1251 |
+
torch.cuda.empty_cache()
|
| 1252 |
+
|
| 1253 |
+
print(f"Модель успешно загружена на устройство: {device}")
|
| 1254 |
+
|
| 1255 |
+
return cpt, version, net_g, tgt_sr, vc, use_f0
|
| 1256 |
+
|
| 1257 |
+
except torch.serialization.pickle.UnpicklingError as e:
|
| 1258 |
+
raise ValueError(f"Файл {model_path} поврежден или имеет неверный формат") from e
|
| 1259 |
+
except Exception as e:
|
| 1260 |
+
raise RuntimeError(f"Ошибка при загрузке модели: {str(e)}") from e
|
| 1261 |
|
| 1262 |
def rvc_infer(
|
| 1263 |
index_path,
|
|
|
|
| 1276 |
hop_length,
|
| 1277 |
vc,
|
| 1278 |
hubert_model,
|
| 1279 |
+
pitch_guidance,
|
| 1280 |
f0_min=50,
|
| 1281 |
f0_max=1100,
|
| 1282 |
format_output="wav",
|
|
|
|
| 1291 |
pipeline = vc.pipeline
|
| 1292 |
|
| 1293 |
mid, left, right = loadaudio(input_path, 16000, stereo_mode)
|
|
|
|
| 1294 |
|
| 1295 |
if stereo_mode == "mono":
|
| 1296 |
if mid is None:
|
|
|
|
| 1500 |
format_output,
|
| 1501 |
output_bitrate,
|
| 1502 |
stereo_mode,
|
| 1503 |
+
embedder_name="hubert_base",
|
| 1504 |
+
pipeline_mode="orig"
|
| 1505 |
+
):
|
| 1506 |
+
rvc_model_path, rvc_index_path = load_rvc_model(voice_model)
|
| 1507 |
+
|
| 1508 |
+
config = Config()
|
| 1509 |
+
hubert_path = model_manager.check_hubert(embedder_name)
|
| 1510 |
+
if not hubert_path:
|
| 1511 |
+
raise ValueError(
|
| 1512 |
+
f"[91mЭмбеддера {embedder_name} не существует. "
|
| 1513 |
+
"Возможно, вы неправильно ввели имя.[0m"
|
| 1514 |
+
)
|
| 1515 |
+
hubert_model = load_hubert(config.device, config.is_half, hubert_path)
|
| 1516 |
+
cpt, version, net_g, tgt_sr, vc, use_f0 = get_vc(
|
| 1517 |
+
config.device, config.is_half, config, rvc_model_path, "fairseq"
|
| 1518 |
+
)
|
| 1519 |
+
|
| 1520 |
+
outputaudio = rvc_infer(
|
| 1521 |
+
rvc_index_path,
|
| 1522 |
+
index_rate,
|
| 1523 |
+
vocals_path,
|
| 1524 |
+
output_path,
|
| 1525 |
+
pitch,
|
| 1526 |
+
f0_method,
|
| 1527 |
+
cpt,
|
| 1528 |
+
version,
|
| 1529 |
+
net_g,
|
| 1530 |
+
filter_radius,
|
| 1531 |
+
tgt_sr,
|
| 1532 |
+
volume_envelope,
|
| 1533 |
+
protect,
|
| 1534 |
+
hop_length,
|
| 1535 |
+
vc,
|
| 1536 |
+
hubert_model,
|
| 1537 |
+
use_f0,
|
| 1538 |
+
f0_min,
|
| 1539 |
+
f0_max,
|
| 1540 |
+
format_output,
|
| 1541 |
+
output_bitrate,
|
| 1542 |
+
stereo_mode,
|
| 1543 |
+
pipeline_mode
|
| 1544 |
+
)
|
| 1545 |
+
|
| 1546 |
+
del hubert_model, cpt, net_g, vc
|
| 1547 |
+
gc.collect()
|
| 1548 |
+
torch.cuda.empty_cache()
|
| 1549 |
+
return outputaudio
|
| 1550 |
+
|
| 1551 |
+
def voice_conversion_transformers(
|
| 1552 |
+
voice_model,
|
| 1553 |
+
vocals_path,
|
| 1554 |
+
output_path,
|
| 1555 |
+
pitch,
|
| 1556 |
+
f0_method,
|
| 1557 |
+
index_rate,
|
| 1558 |
+
filter_radius,
|
| 1559 |
+
volume_envelope,
|
| 1560 |
+
protect,
|
| 1561 |
+
hop_length,
|
| 1562 |
+
f0_min,
|
| 1563 |
+
f0_max,
|
| 1564 |
+
format_output,
|
| 1565 |
+
output_bitrate,
|
| 1566 |
+
stereo_mode,
|
| 1567 |
+
embedder_name="contentvec",
|
| 1568 |
pipeline_mode="orig"
|
| 1569 |
):
|
| 1570 |
rvc_model_path, rvc_index_path = load_rvc_model(voice_model)
|
| 1571 |
|
| 1572 |
config = Config()
|
| 1573 |
+
hubert_path = model_manager.check_hubert_transformers(embedder_name)
|
| 1574 |
+
if not hubert_path:
|
| 1575 |
+
raise ValueError(
|
| 1576 |
+
f"[91mЭмбеддера {embedder_name} не существует. "
|
| 1577 |
+
"Возможно, вы неправильно ввели имя.[0m"
|
| 1578 |
+
)
|
| 1579 |
+
#hubert_model = load_hubert(config.device, config.is_half, hubert_path)
|
| 1580 |
+
hubert_model = HubertModelWithFinalProj.from_pretrained(hubert_path)
|
| 1581 |
+
hubert_model = hubert_model.to(config.device)
|
| 1582 |
+
cpt, version, net_g, tgt_sr, vc, use_f0 = get_vc(
|
| 1583 |
+
config.device, config.is_half, config, rvc_model_path, "transformers"
|
| 1584 |
)
|
| 1585 |
|
| 1586 |
outputaudio = rvc_infer(
|
|
|
|
| 1600 |
hop_length,
|
| 1601 |
vc,
|
| 1602 |
hubert_model,
|
| 1603 |
+
use_f0,
|
| 1604 |
f0_min,
|
| 1605 |
f0_max,
|
| 1606 |
format_output,
|
|
|
|
| 1614 |
torch.cuda.empty_cache()
|
| 1615 |
return outputaudio
|
| 1616 |
|
| 1617 |
+
|
| 1618 |
def vbach_inference(
|
| 1619 |
input_file: str,
|
| 1620 |
model_name: str,
|
|
|
|
| 1626 |
method_pitch: str,
|
| 1627 |
format_name: bool = False,
|
| 1628 |
pipeline_mode: str = "orig",
|
| 1629 |
+
embedder_name: str | None = "hubert_base",
|
| 1630 |
+
stack: str = "fairseq",
|
| 1631 |
add_params: dict = {
|
| 1632 |
"index_rate": 0,
|
| 1633 |
"filter_radius": 3,
|
|
|
|
| 1639 |
"stereo_mode": "mono"
|
| 1640 |
}
|
| 1641 |
):
|
| 1642 |
+
|
| 1643 |
+
if stack == "fairseq":
|
| 1644 |
+
vbach_convert = voice_conversion
|
| 1645 |
+
elif stack == "transformers":
|
| 1646 |
+
vbach_convert = voice_conversion_transformers
|
| 1647 |
+
|
| 1648 |
+
|
| 1649 |
stereo_mode = add_params.get("stereo_mode", "mono")
|
| 1650 |
index_rate = add_params.get("index_rate", 0)
|
| 1651 |
filter_radius = add_params.get("filter_radius", 3)
|
|
|
|
| 1675 |
final_output_name = output_name
|
| 1676 |
|
| 1677 |
final_output_path = os.path.join(output_dir, f"{final_output_name}.{output_format}")
|
| 1678 |
+
output_converted_voice = vbach_convert(voice_model=model_name, vocals_path=input_file, output_path=final_output_path, pitch=pitch, f0_method=method_pitch, index_rate=index_rate, filter_radius=filter_radius, volume_envelope=rms, protect=protect, hop_length=mangio_crepe_hop_length, f0_min=f0_min, f0_max=f0_max, format_output=output_format, output_bitrate=output_bitrate, stereo_mode=stereo_mode, pipeline_mode=pipeline_mode, embedder_name=embedder_name)
|
| 1679 |
print(f"Инференс завершен\nПуть к выходному файлу: \"{output_converted_voice}\"")
|
| 1680 |
return output_converted_voice
|
| 1681 |
|
|
|
|
| 1783 |
action="store_true",
|
| 1784 |
help="Альтернативный пайплайн",
|
| 1785 |
)
|
| 1786 |
+
parser.add_argument(
|
| 1787 |
+
"--use_transformers",
|
| 1788 |
+
action="store_true",
|
| 1789 |
+
help="Использовать transformers",
|
| 1790 |
+
)
|
| 1791 |
+
parser.add_argument(
|
| 1792 |
+
"--embedder_name",
|
| 1793 |
+
type=str,
|
| 1794 |
+
default="hubert_base",
|
| 1795 |
+
help="Имя Hubert модели",
|
| 1796 |
+
)
|
| 1797 |
|
| 1798 |
args = parser.parse_args()
|
| 1799 |
|
| 1800 |
if args.input:
|
| 1801 |
if os.path.exists(args.input) and os.path.isfile(args.input):
|
| 1802 |
if audio.check(args.input):
|
| 1803 |
+
vbach_inference(input_file=args.input, model_name=args.model_name, output_dir=args.output_dir, output_name=args.output_name, output_bitrate=args.output_bitrate, output_format=args.output_format, pitch=args.pitch, method_pitch=args.method_pitch, format_name=args.format_name, add_params={ "index_rate": args.index_rate,"filter_radius": args.filter_radius,"protect": args.protect,"rms": args.rms,"mangio_crepe_hop_length": args.hop_length,"f0_min": args.f0_min,"f0_max": args.f0_max,"stereo_mode": args.stereo_mode}, pipeline_mode="alt" if args.alt_pipeline == True else "orig", embedder_name=args.embedder_name, stack="transformers" if args.use_transformers else "fairseq")
|
| 1804 |
elif os.path.exists(args.input) and os.path.isdir(args.input):
|
| 1805 |
list_valid_files = []
|
| 1806 |
for file in os.listdir(args.input):
|
|
|
|
| 1810 |
if list_valid_files:
|
| 1811 |
for i, vocals_file in enumerate(list_valid_files, start=1):
|
| 1812 |
print(f"Файл {i} из {len(list_valid_files)}: {vocals_file}")
|
| 1813 |
+
vbach_inference(input_file=vocals_file, model_name=args.model_name, output_dir=args.output_dir, output_name=args.output_name, output_bitrate=args.output_bitrate, output_format=args.output_format, pitch=args.pitch, method_pitch=args.method_pitch, format_name=True if len(list_valid_files) > 1 else args.format_name, add_params={ "index_rate": args.index_rate,"filter_radius": args.filter_radius,"protect": args.protect,"rms": args.rms,"mangio_crepe_hop_length": args.hop_length,"f0_min": args.f0_min,"f0_max": args.f0_max,"stereo_mode": args.stereo_mode}, pipeline_mode="alt" if args.alt_pipeline == True else "orig", embedder_name=args.embedder_name, stack="transformers" if args.use_transformers else "fairseq")
|
mvsepless/vbach_lib/algorithm/__init__.py
ADDED
|
File without changes
|
mvsepless/vbach_lib/algorithm/attentions.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
from .commons import convert_pad_shape
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class MultiHeadAttention(torch.nn.Module):
|
| 7 |
+
"""
|
| 8 |
+
Multi-head attention module with optional relative positional encoding and proximal bias.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
channels (int): Number of input channels.
|
| 12 |
+
out_channels (int): Number of output channels.
|
| 13 |
+
n_heads (int): Number of attention heads.
|
| 14 |
+
p_dropout (float, optional): Dropout probability. Defaults to 0.0.
|
| 15 |
+
window_size (int, optional): Window size for relative positional encoding. Defaults to None.
|
| 16 |
+
heads_share (bool, optional): Whether to share relative positional embeddings across heads. Defaults to True.
|
| 17 |
+
block_length (int, optional): Block length for local attention. Defaults to None.
|
| 18 |
+
proximal_bias (bool, optional): Whether to use proximal bias in self-attention. Defaults to False.
|
| 19 |
+
proximal_init (bool, optional): Whether to initialize the key projection weights the same as query projection weights. Defaults to False.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
channels: int,
|
| 25 |
+
out_channels: int,
|
| 26 |
+
n_heads: int,
|
| 27 |
+
p_dropout: float = 0.0,
|
| 28 |
+
window_size: int = None,
|
| 29 |
+
heads_share: bool = True,
|
| 30 |
+
block_length: int = None,
|
| 31 |
+
proximal_bias: bool = False,
|
| 32 |
+
proximal_init: bool = False,
|
| 33 |
+
):
|
| 34 |
+
super().__init__()
|
| 35 |
+
assert (
|
| 36 |
+
channels % n_heads == 0
|
| 37 |
+
), "Channels must be divisible by the number of heads."
|
| 38 |
+
|
| 39 |
+
self.channels = channels
|
| 40 |
+
self.out_channels = out_channels
|
| 41 |
+
self.n_heads = n_heads
|
| 42 |
+
self.k_channels = channels // n_heads
|
| 43 |
+
self.window_size = window_size
|
| 44 |
+
self.block_length = block_length
|
| 45 |
+
self.proximal_bias = proximal_bias
|
| 46 |
+
|
| 47 |
+
# Define projections
|
| 48 |
+
self.conv_q = torch.nn.Conv1d(channels, channels, 1)
|
| 49 |
+
self.conv_k = torch.nn.Conv1d(channels, channels, 1)
|
| 50 |
+
self.conv_v = torch.nn.Conv1d(channels, channels, 1)
|
| 51 |
+
self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
|
| 52 |
+
|
| 53 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 54 |
+
|
| 55 |
+
# Relative positional encodings
|
| 56 |
+
if window_size:
|
| 57 |
+
n_heads_rel = 1 if heads_share else n_heads
|
| 58 |
+
rel_stddev = self.k_channels**-0.5
|
| 59 |
+
self.emb_rel_k = torch.nn.Parameter(
|
| 60 |
+
torch.randn(n_heads_rel, 2 * window_size + 1, self.k_channels)
|
| 61 |
+
* rel_stddev
|
| 62 |
+
)
|
| 63 |
+
self.emb_rel_v = torch.nn.Parameter(
|
| 64 |
+
torch.randn(n_heads_rel, 2 * window_size + 1, self.k_channels)
|
| 65 |
+
* rel_stddev
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# Initialize weights
|
| 69 |
+
torch.nn.init.xavier_uniform_(self.conv_q.weight)
|
| 70 |
+
torch.nn.init.xavier_uniform_(self.conv_k.weight)
|
| 71 |
+
torch.nn.init.xavier_uniform_(self.conv_v.weight)
|
| 72 |
+
torch.nn.init.xavier_uniform_(self.conv_o.weight)
|
| 73 |
+
|
| 74 |
+
if proximal_init:
|
| 75 |
+
with torch.no_grad():
|
| 76 |
+
self.conv_k.weight.copy_(self.conv_q.weight)
|
| 77 |
+
self.conv_k.bias.copy_(self.conv_q.bias)
|
| 78 |
+
|
| 79 |
+
def forward(self, x, c, attn_mask=None):
|
| 80 |
+
# Compute query, key, value projections
|
| 81 |
+
q, k, v = self.conv_q(x), self.conv_k(c), self.conv_v(c)
|
| 82 |
+
|
| 83 |
+
# Compute attention
|
| 84 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
| 85 |
+
|
| 86 |
+
# Final output projection
|
| 87 |
+
return self.conv_o(x)
|
| 88 |
+
|
| 89 |
+
def attention(self, query, key, value, mask=None):
|
| 90 |
+
# Reshape and compute scaled dot-product attention
|
| 91 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
| 92 |
+
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
| 93 |
+
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
| 94 |
+
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
| 95 |
+
|
| 96 |
+
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
| 97 |
+
|
| 98 |
+
if self.window_size:
|
| 99 |
+
assert t_s == t_t, "Relative attention only supports self-attention."
|
| 100 |
+
scores += self._compute_relative_scores(query, t_s)
|
| 101 |
+
|
| 102 |
+
if self.proximal_bias:
|
| 103 |
+
assert t_s == t_t, "Proximal bias only supports self-attention."
|
| 104 |
+
scores += self._attention_bias_proximal(t_s).to(scores.device, scores.dtype)
|
| 105 |
+
|
| 106 |
+
if mask is not None:
|
| 107 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
| 108 |
+
if self.block_length:
|
| 109 |
+
block_mask = (
|
| 110 |
+
torch.ones_like(scores)
|
| 111 |
+
.triu(-self.block_length)
|
| 112 |
+
.tril(self.block_length)
|
| 113 |
+
)
|
| 114 |
+
scores = scores.masked_fill(block_mask == 0, -1e4)
|
| 115 |
+
|
| 116 |
+
# Apply softmax and dropout
|
| 117 |
+
p_attn = self.drop(torch.nn.functional.softmax(scores, dim=-1))
|
| 118 |
+
|
| 119 |
+
# Compute attention output
|
| 120 |
+
output = torch.matmul(p_attn, value)
|
| 121 |
+
|
| 122 |
+
if self.window_size:
|
| 123 |
+
output += self._apply_relative_values(p_attn, t_s)
|
| 124 |
+
|
| 125 |
+
return output.transpose(2, 3).contiguous().view(b, d, t_t), p_attn
|
| 126 |
+
|
| 127 |
+
def _compute_relative_scores(self, query, length):
|
| 128 |
+
rel_emb = self._get_relative_embeddings(self.emb_rel_k, length)
|
| 129 |
+
rel_logits = self._matmul_with_relative_keys(
|
| 130 |
+
query / math.sqrt(self.k_channels), rel_emb
|
| 131 |
+
)
|
| 132 |
+
return self._relative_position_to_absolute_position(rel_logits)
|
| 133 |
+
|
| 134 |
+
def _apply_relative_values(self, p_attn, length):
|
| 135 |
+
rel_weights = self._absolute_position_to_relative_position(p_attn)
|
| 136 |
+
rel_emb = self._get_relative_embeddings(self.emb_rel_v, length)
|
| 137 |
+
return self._matmul_with_relative_values(rel_weights, rel_emb)
|
| 138 |
+
|
| 139 |
+
# Helper methods
|
| 140 |
+
def _matmul_with_relative_values(self, x, y):
|
| 141 |
+
return torch.matmul(x, y.unsqueeze(0))
|
| 142 |
+
|
| 143 |
+
def _matmul_with_relative_keys(self, x, y):
|
| 144 |
+
return torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
| 145 |
+
|
| 146 |
+
def _get_relative_embeddings(self, embeddings, length):
|
| 147 |
+
pad_length = max(length - (self.window_size + 1), 0)
|
| 148 |
+
start = max((self.window_size + 1) - length, 0)
|
| 149 |
+
end = start + 2 * length - 1
|
| 150 |
+
|
| 151 |
+
if pad_length > 0:
|
| 152 |
+
embeddings = torch.nn.functional.pad(
|
| 153 |
+
embeddings,
|
| 154 |
+
convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
|
| 155 |
+
)
|
| 156 |
+
return embeddings[:, start:end]
|
| 157 |
+
|
| 158 |
+
def _relative_position_to_absolute_position(self, x):
|
| 159 |
+
batch, heads, length, _ = x.size()
|
| 160 |
+
x = torch.nn.functional.pad(
|
| 161 |
+
x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])
|
| 162 |
+
)
|
| 163 |
+
x_flat = x.view(batch, heads, length * 2 * length)
|
| 164 |
+
x_flat = torch.nn.functional.pad(
|
| 165 |
+
x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
|
| 166 |
+
)
|
| 167 |
+
return x_flat.view(batch, heads, length + 1, 2 * length - 1)[
|
| 168 |
+
:, :, :length, length - 1 :
|
| 169 |
+
]
|
| 170 |
+
|
| 171 |
+
def _absolute_position_to_relative_position(self, x):
|
| 172 |
+
batch, heads, length, _ = x.size()
|
| 173 |
+
x = torch.nn.functional.pad(
|
| 174 |
+
x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
|
| 175 |
+
)
|
| 176 |
+
x_flat = x.view(batch, heads, length**2 + length * (length - 1))
|
| 177 |
+
x_flat = torch.nn.functional.pad(
|
| 178 |
+
x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])
|
| 179 |
+
)
|
| 180 |
+
return x_flat.view(batch, heads, length, 2 * length)[:, :, :, 1:]
|
| 181 |
+
|
| 182 |
+
def _attention_bias_proximal(self, length):
|
| 183 |
+
r = torch.arange(length, dtype=torch.float32)
|
| 184 |
+
diff = r.unsqueeze(0) - r.unsqueeze(1)
|
| 185 |
+
return -torch.log1p(torch.abs(diff)).unsqueeze(0).unsqueeze(0)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class FFN(torch.nn.Module):
|
| 189 |
+
"""
|
| 190 |
+
Feed-forward network module.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
in_channels (int): Number of input channels.
|
| 194 |
+
out_channels (int): Number of output channels.
|
| 195 |
+
filter_channels (int): Number of filter channels in the convolution layers.
|
| 196 |
+
kernel_size (int): Kernel size of the convolution layers.
|
| 197 |
+
p_dropout (float, optional): Dropout probability. Defaults to 0.0.
|
| 198 |
+
activation (str, optional): Activation function to use. Defaults to None.
|
| 199 |
+
causal (bool, optional): Whether to use causal padding in the convolution layers. Defaults to False.
|
| 200 |
+
"""
|
| 201 |
+
|
| 202 |
+
def __init__(
|
| 203 |
+
self,
|
| 204 |
+
in_channels: int,
|
| 205 |
+
out_channels: int,
|
| 206 |
+
filter_channels: int,
|
| 207 |
+
kernel_size: int,
|
| 208 |
+
p_dropout: float = 0.0,
|
| 209 |
+
activation: str = None,
|
| 210 |
+
causal: bool = False,
|
| 211 |
+
):
|
| 212 |
+
super().__init__()
|
| 213 |
+
self.padding_fn = self._causal_padding if causal else self._same_padding
|
| 214 |
+
|
| 215 |
+
self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size)
|
| 216 |
+
self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size)
|
| 217 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 218 |
+
|
| 219 |
+
self.activation = activation
|
| 220 |
+
|
| 221 |
+
def forward(self, x, x_mask):
|
| 222 |
+
x = self.conv_1(self.padding_fn(x * x_mask))
|
| 223 |
+
x = self._apply_activation(x)
|
| 224 |
+
x = self.drop(x)
|
| 225 |
+
x = self.conv_2(self.padding_fn(x * x_mask))
|
| 226 |
+
return x * x_mask
|
| 227 |
+
|
| 228 |
+
def _apply_activation(self, x):
|
| 229 |
+
if self.activation == "gelu":
|
| 230 |
+
return x * torch.sigmoid(1.702 * x)
|
| 231 |
+
return torch.relu(x)
|
| 232 |
+
|
| 233 |
+
def _causal_padding(self, x):
|
| 234 |
+
pad_l, pad_r = self.conv_1.kernel_size[0] - 1, 0
|
| 235 |
+
return torch.nn.functional.pad(
|
| 236 |
+
x, convert_pad_shape([[0, 0], [0, 0], [pad_l, pad_r]])
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
def _same_padding(self, x):
|
| 240 |
+
pad = (self.conv_1.kernel_size[0] - 1) // 2
|
| 241 |
+
return torch.nn.functional.pad(
|
| 242 |
+
x, convert_pad_shape([[0, 0], [0, 0], [pad, pad]])
|
| 243 |
+
)
|
mvsepless/vbach_lib/algorithm/commons.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def init_weights(m, mean=0.0, std=0.01):
|
| 6 |
+
"""
|
| 7 |
+
Initialize the weights of a module.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
m: The module to initialize.
|
| 11 |
+
mean: The mean of the normal distribution.
|
| 12 |
+
std: The standard deviation of the normal distribution.
|
| 13 |
+
"""
|
| 14 |
+
classname = m.__class__.__name__
|
| 15 |
+
if classname.find("Conv") != -1:
|
| 16 |
+
m.weight.data.normal_(mean, std)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def get_padding(kernel_size, dilation=1):
|
| 20 |
+
"""
|
| 21 |
+
Calculate the padding needed for a convolution.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
kernel_size: The size of the kernel.
|
| 25 |
+
dilation: The dilation of the convolution.
|
| 26 |
+
"""
|
| 27 |
+
return int((kernel_size * dilation - dilation) / 2)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def convert_pad_shape(pad_shape):
|
| 31 |
+
"""
|
| 32 |
+
Convert the pad shape to a list of integers.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
pad_shape: The pad shape..
|
| 36 |
+
"""
|
| 37 |
+
l = pad_shape[::-1]
|
| 38 |
+
pad_shape = [item for sublist in l for item in sublist]
|
| 39 |
+
return pad_shape
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def slice_segments(
|
| 43 |
+
x: torch.Tensor, ids_str: torch.Tensor, segment_size: int = 4, dim: int = 2
|
| 44 |
+
):
|
| 45 |
+
"""
|
| 46 |
+
Slice segments from a tensor, handling tensors with different numbers of dimensions.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
x (torch.Tensor): The tensor to slice.
|
| 50 |
+
ids_str (torch.Tensor): The starting indices of the segments.
|
| 51 |
+
segment_size (int, optional): The size of each segment. Defaults to 4.
|
| 52 |
+
dim (int, optional): The dimension to slice across (2D or 3D tensors). Defaults to 2.
|
| 53 |
+
"""
|
| 54 |
+
if dim == 2:
|
| 55 |
+
ret = torch.zeros_like(x[:, :segment_size])
|
| 56 |
+
elif dim == 3:
|
| 57 |
+
ret = torch.zeros_like(x[:, :, :segment_size])
|
| 58 |
+
|
| 59 |
+
for i in range(x.size(0)):
|
| 60 |
+
idx_str = ids_str[i].item()
|
| 61 |
+
idx_end = idx_str + segment_size
|
| 62 |
+
if dim == 2:
|
| 63 |
+
ret[i] = x[i, idx_str:idx_end]
|
| 64 |
+
else:
|
| 65 |
+
ret[i] = x[i, :, idx_str:idx_end]
|
| 66 |
+
|
| 67 |
+
return ret
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
| 71 |
+
"""
|
| 72 |
+
Randomly slice segments from a tensor.
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
x: The tensor to slice.
|
| 76 |
+
x_lengths: The lengths of the sequences.
|
| 77 |
+
segment_size: The size of each segment.
|
| 78 |
+
"""
|
| 79 |
+
b, d, t = x.size()
|
| 80 |
+
if x_lengths is None:
|
| 81 |
+
x_lengths = t
|
| 82 |
+
ids_str_max = x_lengths - segment_size + 1
|
| 83 |
+
ids_str = (torch.rand([b], device=x.device) * ids_str_max).to(dtype=torch.long)
|
| 84 |
+
ret = slice_segments(x, ids_str, segment_size, dim=3)
|
| 85 |
+
return ret, ids_str
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@torch.jit.script
|
| 89 |
+
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
| 90 |
+
"""
|
| 91 |
+
Fused add tanh sigmoid multiply operation.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
input_a: The first input tensor.
|
| 95 |
+
input_b: The second input tensor.
|
| 96 |
+
n_channels: The number of channels.
|
| 97 |
+
"""
|
| 98 |
+
n_channels_int = n_channels[0]
|
| 99 |
+
in_act = input_a + input_b
|
| 100 |
+
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
| 101 |
+
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
| 102 |
+
acts = t_act * s_act
|
| 103 |
+
return acts
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def sequence_mask(length: torch.Tensor, max_length: Optional[int] = None):
|
| 107 |
+
"""
|
| 108 |
+
Generate a sequence mask.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
length: The lengths of the sequences.
|
| 112 |
+
max_length: The maximum length of the sequences.
|
| 113 |
+
"""
|
| 114 |
+
if max_length is None:
|
| 115 |
+
max_length = length.max()
|
| 116 |
+
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
| 117 |
+
return x.unsqueeze(0) < length.unsqueeze(1)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def grad_norm(parameters, norm_type: float = 2.0):
|
| 121 |
+
"""
|
| 122 |
+
Calculates norm of parameter gradients
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
parameters: The list of parameters to clip.
|
| 126 |
+
norm_type: The type of norm to use for clipping.
|
| 127 |
+
"""
|
| 128 |
+
if isinstance(parameters, torch.Tensor):
|
| 129 |
+
parameters = [parameters]
|
| 130 |
+
|
| 131 |
+
parameters = [p for p in parameters if p.grad is not None]
|
| 132 |
+
|
| 133 |
+
if not parameters:
|
| 134 |
+
return 0.0
|
| 135 |
+
|
| 136 |
+
return torch.linalg.vector_norm(
|
| 137 |
+
torch.stack([p.grad.norm(norm_type) for p in parameters]), ord=norm_type
|
| 138 |
+
).item()
|
mvsepless/vbach_lib/algorithm/discriminators.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
from torch.utils.checkpoint import checkpoint
|
| 4 |
+
from torch.nn.utils.parametrizations import spectral_norm, weight_norm
|
| 5 |
+
|
| 6 |
+
from .commons import get_padding
|
| 7 |
+
from .residuals import LRELU_SLOPE
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class MultiPeriodDiscriminator(torch.nn.Module):
|
| 11 |
+
"""
|
| 12 |
+
Multi-period discriminator.
|
| 13 |
+
|
| 14 |
+
This class implements a multi-period discriminator, which is used to
|
| 15 |
+
discriminate between real and fake audio signals. The discriminator
|
| 16 |
+
is composed of a series of convolutional layers that are applied to
|
| 17 |
+
the input signal at different periods.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
use_spectral_norm (bool): Whether to use spectral normalization.
|
| 21 |
+
Defaults to False.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
use_spectral_norm: bool = False,
|
| 27 |
+
checkpointing: bool = False,
|
| 28 |
+
version: str = "v2",
|
| 29 |
+
):
|
| 30 |
+
super().__init__()
|
| 31 |
+
|
| 32 |
+
if version == "v1":
|
| 33 |
+
periods = [2, 3, 5, 7, 11, 17]
|
| 34 |
+
resolutions = []
|
| 35 |
+
elif version == "v2":
|
| 36 |
+
periods = [2, 3, 5, 7, 11, 17, 23, 37]
|
| 37 |
+
resolutions = []
|
| 38 |
+
elif version == "v3":
|
| 39 |
+
periods = [2, 3, 5, 7, 11]
|
| 40 |
+
resolutions = [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]]
|
| 41 |
+
|
| 42 |
+
self.checkpointing = checkpointing
|
| 43 |
+
self.discriminators = torch.nn.ModuleList(
|
| 44 |
+
[DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
| 45 |
+
+ [DiscriminatorP(p, use_spectral_norm=use_spectral_norm) for p in periods]
|
| 46 |
+
+ [
|
| 47 |
+
DiscriminatorR(r, use_spectral_norm=use_spectral_norm)
|
| 48 |
+
for r in resolutions
|
| 49 |
+
]
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
def forward(self, y, y_hat):
|
| 53 |
+
y_d_rs, y_d_gs, fmap_rs, fmap_gs = [], [], [], []
|
| 54 |
+
for d in self.discriminators:
|
| 55 |
+
if self.training and self.checkpointing:
|
| 56 |
+
y_d_r, fmap_r = checkpoint(d, y, use_reentrant=False)
|
| 57 |
+
y_d_g, fmap_g = checkpoint(d, y_hat, use_reentrant=False)
|
| 58 |
+
else:
|
| 59 |
+
y_d_r, fmap_r = d(y)
|
| 60 |
+
y_d_g, fmap_g = d(y_hat)
|
| 61 |
+
y_d_rs.append(y_d_r)
|
| 62 |
+
y_d_gs.append(y_d_g)
|
| 63 |
+
fmap_rs.append(fmap_r)
|
| 64 |
+
fmap_gs.append(fmap_g)
|
| 65 |
+
|
| 66 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class DiscriminatorS(torch.nn.Module):
|
| 70 |
+
"""
|
| 71 |
+
Discriminator for the short-term component.
|
| 72 |
+
|
| 73 |
+
This class implements a discriminator for the short-term component
|
| 74 |
+
of the audio signal. The discriminator is composed of a series of
|
| 75 |
+
convolutional layers that are applied to the input signal.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self, use_spectral_norm: bool = False):
|
| 79 |
+
super().__init__()
|
| 80 |
+
|
| 81 |
+
norm_f = spectral_norm if use_spectral_norm else weight_norm
|
| 82 |
+
self.convs = torch.nn.ModuleList(
|
| 83 |
+
[
|
| 84 |
+
norm_f(torch.nn.Conv1d(1, 16, 15, 1, padding=7)),
|
| 85 |
+
norm_f(torch.nn.Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
| 86 |
+
norm_f(torch.nn.Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
| 87 |
+
norm_f(torch.nn.Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
| 88 |
+
norm_f(torch.nn.Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
| 89 |
+
norm_f(torch.nn.Conv1d(1024, 1024, 5, 1, padding=2)),
|
| 90 |
+
]
|
| 91 |
+
)
|
| 92 |
+
self.conv_post = norm_f(torch.nn.Conv1d(1024, 1, 3, 1, padding=1))
|
| 93 |
+
self.lrelu = torch.nn.LeakyReLU(LRELU_SLOPE)
|
| 94 |
+
|
| 95 |
+
def forward(self, x):
|
| 96 |
+
fmap = []
|
| 97 |
+
for conv in self.convs:
|
| 98 |
+
x = self.lrelu(conv(x))
|
| 99 |
+
fmap.append(x)
|
| 100 |
+
x = self.conv_post(x)
|
| 101 |
+
fmap.append(x)
|
| 102 |
+
x = torch.flatten(x, 1, -1)
|
| 103 |
+
return x, fmap
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class DiscriminatorP(torch.nn.Module):
|
| 107 |
+
"""
|
| 108 |
+
Discriminator for the long-term component.
|
| 109 |
+
|
| 110 |
+
This class implements a discriminator for the long-term component
|
| 111 |
+
of the audio signal. The discriminator is composed of a series of
|
| 112 |
+
convolutional layers that are applied to the input signal at a given
|
| 113 |
+
period.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
period (int): Period of the discriminator.
|
| 117 |
+
kernel_size (int): Kernel size of the convolutional layers. Defaults to 5.
|
| 118 |
+
stride (int): Stride of the convolutional layers. Defaults to 3.
|
| 119 |
+
use_spectral_norm (bool): Whether to use spectral normalization. Defaults to False.
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
def __init__(
|
| 123 |
+
self,
|
| 124 |
+
period: int,
|
| 125 |
+
kernel_size: int = 5,
|
| 126 |
+
stride: int = 3,
|
| 127 |
+
use_spectral_norm: bool = False,
|
| 128 |
+
):
|
| 129 |
+
super().__init__()
|
| 130 |
+
self.period = period
|
| 131 |
+
norm_f = spectral_norm if use_spectral_norm else weight_norm
|
| 132 |
+
|
| 133 |
+
in_channels = [1, 32, 128, 512, 1024]
|
| 134 |
+
out_channels = [32, 128, 512, 1024, 1024]
|
| 135 |
+
strides = [3, 3, 3, 3, 1]
|
| 136 |
+
|
| 137 |
+
self.convs = torch.nn.ModuleList(
|
| 138 |
+
[
|
| 139 |
+
norm_f(
|
| 140 |
+
torch.nn.Conv2d(
|
| 141 |
+
in_ch,
|
| 142 |
+
out_ch,
|
| 143 |
+
(kernel_size, 1),
|
| 144 |
+
(s, 1),
|
| 145 |
+
padding=(get_padding(kernel_size, 1), 0),
|
| 146 |
+
)
|
| 147 |
+
)
|
| 148 |
+
for in_ch, out_ch, s in zip(in_channels, out_channels, strides)
|
| 149 |
+
]
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
self.conv_post = norm_f(torch.nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
| 153 |
+
self.lrelu = torch.nn.LeakyReLU(LRELU_SLOPE)
|
| 154 |
+
|
| 155 |
+
def forward(self, x):
|
| 156 |
+
fmap = []
|
| 157 |
+
b, c, t = x.shape
|
| 158 |
+
if t % self.period != 0:
|
| 159 |
+
n_pad = self.period - (t % self.period)
|
| 160 |
+
x = torch.nn.functional.pad(x, (0, n_pad), "reflect")
|
| 161 |
+
x = x.view(b, c, -1, self.period)
|
| 162 |
+
|
| 163 |
+
for conv in self.convs:
|
| 164 |
+
x = self.lrelu(conv(x))
|
| 165 |
+
fmap.append(x)
|
| 166 |
+
x = self.conv_post(x)
|
| 167 |
+
fmap.append(x)
|
| 168 |
+
x = torch.flatten(x, 1, -1)
|
| 169 |
+
return x, fmap
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
class DiscriminatorR(torch.nn.Module):
|
| 173 |
+
def __init__(self, resolution, use_spectral_norm=False):
|
| 174 |
+
super().__init__()
|
| 175 |
+
|
| 176 |
+
self.resolution = resolution
|
| 177 |
+
self.lrelu_slope = 0.1
|
| 178 |
+
norm_f = spectral_norm if use_spectral_norm else weight_norm
|
| 179 |
+
|
| 180 |
+
self.convs = torch.nn.ModuleList(
|
| 181 |
+
[
|
| 182 |
+
norm_f(
|
| 183 |
+
torch.nn.Conv2d(
|
| 184 |
+
1,
|
| 185 |
+
32,
|
| 186 |
+
(3, 9),
|
| 187 |
+
padding=(1, 4),
|
| 188 |
+
)
|
| 189 |
+
),
|
| 190 |
+
norm_f(
|
| 191 |
+
torch.nn.Conv2d(
|
| 192 |
+
32,
|
| 193 |
+
32,
|
| 194 |
+
(3, 9),
|
| 195 |
+
stride=(1, 2),
|
| 196 |
+
padding=(1, 4),
|
| 197 |
+
)
|
| 198 |
+
),
|
| 199 |
+
norm_f(
|
| 200 |
+
torch.nn.Conv2d(
|
| 201 |
+
32,
|
| 202 |
+
32,
|
| 203 |
+
(3, 9),
|
| 204 |
+
stride=(1, 2),
|
| 205 |
+
padding=(1, 4),
|
| 206 |
+
)
|
| 207 |
+
),
|
| 208 |
+
norm_f(
|
| 209 |
+
torch.nn.Conv2d(
|
| 210 |
+
32,
|
| 211 |
+
32,
|
| 212 |
+
(3, 9),
|
| 213 |
+
stride=(1, 2),
|
| 214 |
+
padding=(1, 4),
|
| 215 |
+
)
|
| 216 |
+
),
|
| 217 |
+
norm_f(
|
| 218 |
+
torch.nn.Conv2d(
|
| 219 |
+
32,
|
| 220 |
+
32,
|
| 221 |
+
(3, 3),
|
| 222 |
+
padding=(1, 1),
|
| 223 |
+
)
|
| 224 |
+
),
|
| 225 |
+
]
|
| 226 |
+
)
|
| 227 |
+
self.conv_post = norm_f(torch.nn.Conv2d(32, 1, (3, 3), padding=(1, 1)))
|
| 228 |
+
|
| 229 |
+
def forward(self, x):
|
| 230 |
+
fmap = []
|
| 231 |
+
|
| 232 |
+
x = self.spectrogram(x).unsqueeze(1)
|
| 233 |
+
|
| 234 |
+
for layer in self.convs:
|
| 235 |
+
x = F.leaky_relu(layer(x), self.lrelu_slope)
|
| 236 |
+
fmap.append(x)
|
| 237 |
+
x = self.conv_post(x)
|
| 238 |
+
fmap.append(x)
|
| 239 |
+
|
| 240 |
+
return torch.flatten(x, 1, -1), fmap
|
| 241 |
+
|
| 242 |
+
def spectrogram(self, x):
|
| 243 |
+
n_fft, hop_length, win_length = self.resolution
|
| 244 |
+
pad = int((n_fft - hop_length) / 2)
|
| 245 |
+
x = F.pad(
|
| 246 |
+
x,
|
| 247 |
+
(pad, pad),
|
| 248 |
+
mode="reflect",
|
| 249 |
+
).squeeze(1)
|
| 250 |
+
x = torch.stft(
|
| 251 |
+
x,
|
| 252 |
+
n_fft=n_fft,
|
| 253 |
+
hop_length=hop_length,
|
| 254 |
+
win_length=win_length,
|
| 255 |
+
window=torch.ones(win_length, device=x.device),
|
| 256 |
+
center=False,
|
| 257 |
+
return_complex=True,
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
mag = torch.norm(torch.view_as_real(x), p=2, dim=-1) # [B, F, TT]
|
| 261 |
+
|
| 262 |
+
return mag
|
mvsepless/vbach_lib/algorithm/encoders.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from .commons import sequence_mask
|
| 6 |
+
from .modules import WaveNet
|
| 7 |
+
from .normalization import LayerNorm
|
| 8 |
+
from .attentions import FFN, MultiHeadAttention
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Encoder(torch.nn.Module):
|
| 12 |
+
"""
|
| 13 |
+
Encoder module for the Transformer model.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
hidden_channels (int): Number of hidden channels in the encoder.
|
| 17 |
+
filter_channels (int): Number of filter channels in the feed-forward network.
|
| 18 |
+
n_heads (int): Number of attention heads.
|
| 19 |
+
n_layers (int): Number of encoder layers.
|
| 20 |
+
kernel_size (int, optional): Kernel size of the convolution layers in the feed-forward network. Defaults to 1.
|
| 21 |
+
p_dropout (float, optional): Dropout probability. Defaults to 0.0.
|
| 22 |
+
window_size (int, optional): Window size for relative positional encoding. Defaults to 10.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(
|
| 26 |
+
self,
|
| 27 |
+
hidden_channels: int,
|
| 28 |
+
filter_channels: int,
|
| 29 |
+
n_heads: int,
|
| 30 |
+
n_layers: int,
|
| 31 |
+
kernel_size: int = 1,
|
| 32 |
+
p_dropout: float = 0.0,
|
| 33 |
+
window_size: int = 10,
|
| 34 |
+
):
|
| 35 |
+
super().__init__()
|
| 36 |
+
|
| 37 |
+
self.hidden_channels = hidden_channels
|
| 38 |
+
self.n_layers = n_layers
|
| 39 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 40 |
+
|
| 41 |
+
self.attn_layers = torch.nn.ModuleList(
|
| 42 |
+
[
|
| 43 |
+
MultiHeadAttention(
|
| 44 |
+
hidden_channels,
|
| 45 |
+
hidden_channels,
|
| 46 |
+
n_heads,
|
| 47 |
+
p_dropout=p_dropout,
|
| 48 |
+
window_size=window_size,
|
| 49 |
+
)
|
| 50 |
+
for _ in range(n_layers)
|
| 51 |
+
]
|
| 52 |
+
)
|
| 53 |
+
self.norm_layers_1 = torch.nn.ModuleList(
|
| 54 |
+
[LayerNorm(hidden_channels) for _ in range(n_layers)]
|
| 55 |
+
)
|
| 56 |
+
self.ffn_layers = torch.nn.ModuleList(
|
| 57 |
+
[
|
| 58 |
+
FFN(
|
| 59 |
+
hidden_channels,
|
| 60 |
+
hidden_channels,
|
| 61 |
+
filter_channels,
|
| 62 |
+
kernel_size,
|
| 63 |
+
p_dropout=p_dropout,
|
| 64 |
+
)
|
| 65 |
+
for _ in range(n_layers)
|
| 66 |
+
]
|
| 67 |
+
)
|
| 68 |
+
self.norm_layers_2 = torch.nn.ModuleList(
|
| 69 |
+
[LayerNorm(hidden_channels) for _ in range(n_layers)]
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
def forward(self, x, x_mask):
|
| 73 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
| 74 |
+
x = x * x_mask
|
| 75 |
+
|
| 76 |
+
for i in range(self.n_layers):
|
| 77 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
| 78 |
+
y = self.drop(y)
|
| 79 |
+
x = self.norm_layers_1[i](x + y)
|
| 80 |
+
|
| 81 |
+
y = self.ffn_layers[i](x, x_mask)
|
| 82 |
+
y = self.drop(y)
|
| 83 |
+
x = self.norm_layers_2[i](x + y)
|
| 84 |
+
|
| 85 |
+
return x * x_mask
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class TextEncoder(torch.nn.Module):
|
| 89 |
+
"""
|
| 90 |
+
Text Encoder with configurable embedding dimension.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
out_channels (int): Output channels of the encoder.
|
| 94 |
+
hidden_channels (int): Hidden channels of the encoder.
|
| 95 |
+
filter_channels (int): Filter channels of the encoder.
|
| 96 |
+
n_heads (int): Number of attention heads.
|
| 97 |
+
n_layers (int): Number of encoder layers.
|
| 98 |
+
kernel_size (int): Kernel size of the convolutional layers.
|
| 99 |
+
p_dropout (float): Dropout probability.
|
| 100 |
+
embedding_dim (int): Embedding dimension for phone embeddings (v1 = 256, v2 = 768).
|
| 101 |
+
f0 (bool, optional): Whether to use F0 embedding. Defaults to True.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(
|
| 105 |
+
self,
|
| 106 |
+
out_channels: int,
|
| 107 |
+
hidden_channels: int,
|
| 108 |
+
filter_channels: int,
|
| 109 |
+
n_heads: int,
|
| 110 |
+
n_layers: int,
|
| 111 |
+
kernel_size: int,
|
| 112 |
+
p_dropout: float,
|
| 113 |
+
embedding_dim: int,
|
| 114 |
+
f0: bool = True,
|
| 115 |
+
):
|
| 116 |
+
super().__init__()
|
| 117 |
+
self.hidden_channels = hidden_channels
|
| 118 |
+
self.out_channels = out_channels
|
| 119 |
+
self.emb_phone = torch.nn.Linear(embedding_dim, hidden_channels)
|
| 120 |
+
self.lrelu = torch.nn.LeakyReLU(0.1, inplace=True)
|
| 121 |
+
self.emb_pitch = torch.nn.Embedding(256, hidden_channels) if f0 else None
|
| 122 |
+
|
| 123 |
+
self.encoder = Encoder(
|
| 124 |
+
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
| 125 |
+
)
|
| 126 |
+
self.proj = torch.nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
| 127 |
+
|
| 128 |
+
def forward(
|
| 129 |
+
self, phone: torch.Tensor, pitch: Optional[torch.Tensor], lengths: torch.Tensor
|
| 130 |
+
):
|
| 131 |
+
x = self.emb_phone(phone)
|
| 132 |
+
if pitch is not None and self.emb_pitch:
|
| 133 |
+
x += self.emb_pitch(pitch)
|
| 134 |
+
|
| 135 |
+
x *= math.sqrt(self.hidden_channels)
|
| 136 |
+
x = self.lrelu(x)
|
| 137 |
+
x = x.transpose(1, -1) # [B, H, T]
|
| 138 |
+
|
| 139 |
+
x_mask = sequence_mask(lengths, x.size(2)).unsqueeze(1).to(x.dtype)
|
| 140 |
+
x = self.encoder(x, x_mask)
|
| 141 |
+
stats = self.proj(x) * x_mask
|
| 142 |
+
|
| 143 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
| 144 |
+
return m, logs, x_mask
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class PosteriorEncoder(torch.nn.Module):
|
| 148 |
+
"""
|
| 149 |
+
Posterior Encoder for inferring latent representation.
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
in_channels (int): Number of channels in the input.
|
| 153 |
+
out_channels (int): Number of channels in the output.
|
| 154 |
+
hidden_channels (int): Number of hidden channels in the encoder.
|
| 155 |
+
kernel_size (int): Kernel size of the convolutional layers.
|
| 156 |
+
dilation_rate (int): Dilation rate of the convolutional layers.
|
| 157 |
+
n_layers (int): Number of layers in the encoder.
|
| 158 |
+
gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 0.
|
| 159 |
+
"""
|
| 160 |
+
|
| 161 |
+
def __init__(
|
| 162 |
+
self,
|
| 163 |
+
in_channels: int,
|
| 164 |
+
out_channels: int,
|
| 165 |
+
hidden_channels: int,
|
| 166 |
+
kernel_size: int,
|
| 167 |
+
dilation_rate: int,
|
| 168 |
+
n_layers: int,
|
| 169 |
+
gin_channels: int = 0,
|
| 170 |
+
):
|
| 171 |
+
super().__init__()
|
| 172 |
+
self.out_channels = out_channels
|
| 173 |
+
self.pre = torch.nn.Conv1d(in_channels, hidden_channels, 1)
|
| 174 |
+
self.enc = WaveNet(
|
| 175 |
+
hidden_channels,
|
| 176 |
+
kernel_size,
|
| 177 |
+
dilation_rate,
|
| 178 |
+
n_layers,
|
| 179 |
+
gin_channels=gin_channels,
|
| 180 |
+
)
|
| 181 |
+
self.proj = torch.nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
| 182 |
+
|
| 183 |
+
def forward(
|
| 184 |
+
self, x: torch.Tensor, x_lengths: torch.Tensor, g: Optional[torch.Tensor] = None
|
| 185 |
+
):
|
| 186 |
+
x_mask = sequence_mask(x_lengths, x.size(2)).unsqueeze(1).to(x.dtype)
|
| 187 |
+
|
| 188 |
+
x = self.pre(x) * x_mask
|
| 189 |
+
x = self.enc(x, x_mask, g=g)
|
| 190 |
+
|
| 191 |
+
stats = self.proj(x) * x_mask
|
| 192 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
| 193 |
+
|
| 194 |
+
z = m + torch.randn_like(m) * torch.exp(logs)
|
| 195 |
+
z *= x_mask
|
| 196 |
+
|
| 197 |
+
return z, m, logs, x_mask
|
| 198 |
+
|
| 199 |
+
def remove_weight_norm(self):
|
| 200 |
+
self.enc.remove_weight_norm()
|
| 201 |
+
|
| 202 |
+
def __prepare_scriptable__(self):
|
| 203 |
+
for hook in self.enc._forward_pre_hooks.values():
|
| 204 |
+
if (
|
| 205 |
+
hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
|
| 206 |
+
and hook.__class__.__name__ == "WeightNorm"
|
| 207 |
+
):
|
| 208 |
+
torch.nn.utils.remove_weight_norm(self.enc)
|
| 209 |
+
return self
|
mvsepless/vbach_lib/algorithm/generators/__init__.py
ADDED
|
File without changes
|
mvsepless/vbach_lib/algorithm/generators/hifigan.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import numpy as np
|
| 3 |
+
from torch.nn.utils import remove_weight_norm
|
| 4 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
from ..residuals import LRELU_SLOPE, ResBlock
|
| 8 |
+
from ..commons import init_weights
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class HiFiGANGenerator(torch.nn.Module):
|
| 12 |
+
"""
|
| 13 |
+
HiFi-GAN Generator module for audio synthesis.
|
| 14 |
+
|
| 15 |
+
This module implements the generator part of the HiFi-GAN architecture,
|
| 16 |
+
which uses transposed convolutions for upsampling and residual blocks for
|
| 17 |
+
refining the audio output. It can also incorporate global conditioning.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
initial_channel (int): Number of input channels to the initial convolutional layer.
|
| 21 |
+
resblock_kernel_sizes (list): List of kernel sizes for the residual blocks.
|
| 22 |
+
resblock_dilation_sizes (list): List of lists of dilation rates for the residual blocks, corresponding to each kernel size.
|
| 23 |
+
upsample_rates (list): List of upsampling factors for each upsampling layer.
|
| 24 |
+
upsample_initial_channel (int): Number of output channels from the initial convolutional layer, which is also the input to the first upsampling layer.
|
| 25 |
+
upsample_kernel_sizes (list): List of kernel sizes for the transposed convolutional layers used for upsampling.
|
| 26 |
+
gin_channels (int, optional): Number of input channels for the global conditioning. If 0, no global conditioning is used. Defaults to 0.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
initial_channel: int,
|
| 32 |
+
resblock_kernel_sizes: list,
|
| 33 |
+
resblock_dilation_sizes: list,
|
| 34 |
+
upsample_rates: list,
|
| 35 |
+
upsample_initial_channel: int,
|
| 36 |
+
upsample_kernel_sizes: list,
|
| 37 |
+
gin_channels: int = 0,
|
| 38 |
+
):
|
| 39 |
+
super(HiFiGANGenerator, self).__init__()
|
| 40 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
| 41 |
+
self.num_upsamples = len(upsample_rates)
|
| 42 |
+
self.conv_pre = torch.nn.Conv1d(
|
| 43 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
self.ups = torch.nn.ModuleList()
|
| 47 |
+
self.resblocks = torch.nn.ModuleList()
|
| 48 |
+
|
| 49 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
| 50 |
+
self.ups.append(
|
| 51 |
+
weight_norm(
|
| 52 |
+
torch.nn.ConvTranspose1d(
|
| 53 |
+
upsample_initial_channel // (2**i),
|
| 54 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
| 55 |
+
k,
|
| 56 |
+
u,
|
| 57 |
+
padding=(k - u) // 2,
|
| 58 |
+
)
|
| 59 |
+
)
|
| 60 |
+
)
|
| 61 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
| 62 |
+
for j, (k, d) in enumerate(
|
| 63 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
| 64 |
+
):
|
| 65 |
+
self.resblocks.append(ResBlock(ch, k, d))
|
| 66 |
+
|
| 67 |
+
self.conv_post = torch.nn.Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
| 68 |
+
self.ups.apply(init_weights)
|
| 69 |
+
|
| 70 |
+
if gin_channels != 0:
|
| 71 |
+
self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
| 72 |
+
|
| 73 |
+
def forward(self, x: torch.Tensor, g: Optional[torch.Tensor] = None):
|
| 74 |
+
# new tensor
|
| 75 |
+
x = self.conv_pre(x)
|
| 76 |
+
|
| 77 |
+
if g is not None:
|
| 78 |
+
x = x + self.cond(g)
|
| 79 |
+
|
| 80 |
+
for i in range(self.num_upsamples):
|
| 81 |
+
x = torch.nn.functional.leaky_relu(x, LRELU_SLOPE)
|
| 82 |
+
x = self.ups[i](x)
|
| 83 |
+
xs = None
|
| 84 |
+
for j in range(self.num_kernels):
|
| 85 |
+
if xs is None:
|
| 86 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
| 87 |
+
else:
|
| 88 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
| 89 |
+
x = xs / self.num_kernels
|
| 90 |
+
# in-place call
|
| 91 |
+
x = torch.nn.functional.leaky_relu(x)
|
| 92 |
+
x = self.conv_post(x)
|
| 93 |
+
# in-place call
|
| 94 |
+
x = torch.tanh(x)
|
| 95 |
+
|
| 96 |
+
return x
|
| 97 |
+
|
| 98 |
+
def __prepare_scriptable__(self):
|
| 99 |
+
for l in self.ups_and_resblocks:
|
| 100 |
+
for hook in l._forward_pre_hooks.values():
|
| 101 |
+
if (
|
| 102 |
+
hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
|
| 103 |
+
and hook.__class__.__name__ == "WeightNorm"
|
| 104 |
+
):
|
| 105 |
+
torch.nn.utils.remove_weight_norm(l)
|
| 106 |
+
return self
|
| 107 |
+
|
| 108 |
+
def remove_weight_norm(self):
|
| 109 |
+
for l in self.ups:
|
| 110 |
+
remove_weight_norm(l)
|
| 111 |
+
for l in self.resblocks:
|
| 112 |
+
l.remove_weight_norm()
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class SineGenerator(torch.nn.Module):
|
| 116 |
+
"""
|
| 117 |
+
Sine wave generator with optional harmonic overtones and noise.
|
| 118 |
+
|
| 119 |
+
This module generates sine waves for a fundamental frequency and its harmonics.
|
| 120 |
+
It can also add Gaussian noise and apply a voiced/unvoiced mask.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
sampling_rate (int): The sampling rate of the audio in Hz.
|
| 124 |
+
num_harmonics (int, optional): The number of harmonic overtones to generate. Defaults to 0.
|
| 125 |
+
sine_amplitude (float, optional): The amplitude of the sine wave components. Defaults to 0.1.
|
| 126 |
+
noise_stddev (float, optional): The standard deviation of the additive Gaussian noise. Defaults to 0.003.
|
| 127 |
+
voiced_threshold (float, optional): The threshold for the fundamental frequency (F0) to determine if a frame is voiced. Defaults to 0.0.
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
def __init__(
|
| 131 |
+
self,
|
| 132 |
+
sampling_rate: int,
|
| 133 |
+
num_harmonics: int = 0,
|
| 134 |
+
sine_amplitude: float = 0.1,
|
| 135 |
+
noise_stddev: float = 0.003,
|
| 136 |
+
voiced_threshold: float = 0.0,
|
| 137 |
+
):
|
| 138 |
+
super(SineGenerator, self).__init__()
|
| 139 |
+
self.sampling_rate = sampling_rate
|
| 140 |
+
self.num_harmonics = num_harmonics
|
| 141 |
+
self.sine_amplitude = sine_amplitude
|
| 142 |
+
self.noise_stddev = noise_stddev
|
| 143 |
+
self.voiced_threshold = voiced_threshold
|
| 144 |
+
self.waveform_dim = self.num_harmonics + 1 # fundamental + harmonics
|
| 145 |
+
|
| 146 |
+
def _compute_voiced_unvoiced(self, f0: torch.Tensor):
|
| 147 |
+
"""
|
| 148 |
+
Generates a binary mask indicating voiced/unvoiced frames based on the fundamental frequency.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
f0 (torch.Tensor): Fundamental frequency tensor of shape (batch_size, length).
|
| 152 |
+
"""
|
| 153 |
+
uv_mask = (f0 > self.voiced_threshold).float()
|
| 154 |
+
return uv_mask
|
| 155 |
+
|
| 156 |
+
def _generate_sine_wave(self, f0: torch.Tensor, upsampling_factor: int):
|
| 157 |
+
"""
|
| 158 |
+
Generates sine waves for the fundamental frequency and its harmonics.
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
f0 (torch.Tensor): Fundamental frequency tensor of shape (batch_size, length, 1).
|
| 162 |
+
upsampling_factor (int): The factor by which to upsample the sine wave.
|
| 163 |
+
"""
|
| 164 |
+
batch_size, length, _ = f0.shape
|
| 165 |
+
|
| 166 |
+
# Create an upsampling grid
|
| 167 |
+
upsampling_grid = torch.arange(
|
| 168 |
+
1, upsampling_factor + 1, dtype=f0.dtype, device=f0.device
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# Calculate phase increments
|
| 172 |
+
phase_increments = (f0 / self.sampling_rate) * upsampling_grid
|
| 173 |
+
phase_remainder = torch.fmod(phase_increments[:, :-1, -1:] + 0.5, 1.0) - 0.5
|
| 174 |
+
cumulative_phase = phase_remainder.cumsum(dim=1).fmod(1.0).to(f0.dtype)
|
| 175 |
+
phase_increments += torch.nn.functional.pad(
|
| 176 |
+
cumulative_phase, (0, 0, 1, 0), mode="constant"
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
# Reshape to match the sine wave shape
|
| 180 |
+
phase_increments = phase_increments.reshape(batch_size, -1, 1)
|
| 181 |
+
|
| 182 |
+
# Scale for harmonics
|
| 183 |
+
harmonic_scale = torch.arange(
|
| 184 |
+
1, self.waveform_dim + 1, dtype=f0.dtype, device=f0.device
|
| 185 |
+
).reshape(1, 1, -1)
|
| 186 |
+
phase_increments *= harmonic_scale
|
| 187 |
+
|
| 188 |
+
# Add random phase offset (except for the fundamental)
|
| 189 |
+
random_phase = torch.rand(1, 1, self.waveform_dim, device=f0.device)
|
| 190 |
+
random_phase[..., 0] = 0 # Fundamental frequency has no random offset
|
| 191 |
+
phase_increments += random_phase
|
| 192 |
+
|
| 193 |
+
# Generate sine waves
|
| 194 |
+
sine_waves = torch.sin(2 * np.pi * phase_increments)
|
| 195 |
+
return sine_waves
|
| 196 |
+
|
| 197 |
+
def forward(self, f0: torch.Tensor, upsampling_factor: int):
|
| 198 |
+
with torch.no_grad():
|
| 199 |
+
# Expand `f0` to include waveform dimensions
|
| 200 |
+
f0 = f0.unsqueeze(-1)
|
| 201 |
+
|
| 202 |
+
# Generate sine waves
|
| 203 |
+
sine_waves = (
|
| 204 |
+
self._generate_sine_wave(f0, upsampling_factor) * self.sine_amplitude
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
# Compute voiced/unvoiced mask
|
| 208 |
+
voiced_mask = self._compute_voiced_unvoiced(f0)
|
| 209 |
+
|
| 210 |
+
# Upsample voiced/unvoiced mask
|
| 211 |
+
voiced_mask = torch.nn.functional.interpolate(
|
| 212 |
+
voiced_mask.transpose(2, 1),
|
| 213 |
+
scale_factor=float(upsampling_factor),
|
| 214 |
+
mode="nearest",
|
| 215 |
+
).transpose(2, 1)
|
| 216 |
+
|
| 217 |
+
# Compute noise amplitude
|
| 218 |
+
noise_amplitude = voiced_mask * self.noise_stddev + (1 - voiced_mask) * (
|
| 219 |
+
self.sine_amplitude / 3
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
# Add Gaussian noise
|
| 223 |
+
noise = noise_amplitude * torch.randn_like(sine_waves)
|
| 224 |
+
|
| 225 |
+
# Combine sine waves and noise
|
| 226 |
+
sine_waveforms = sine_waves * voiced_mask + noise
|
| 227 |
+
|
| 228 |
+
return sine_waveforms, voiced_mask, noise
|
mvsepless/vbach_lib/algorithm/generators/hifigan_mrf.py
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from torch.nn.utils import remove_weight_norm
|
| 7 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 8 |
+
from torch.utils.checkpoint import checkpoint
|
| 9 |
+
|
| 10 |
+
LRELU_SLOPE = 0.1
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class MRFLayer(torch.nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
A single layer of the Multi-Receptive Field (MRF) block.
|
| 16 |
+
|
| 17 |
+
This layer consists of two 1D convolutional layers with weight normalization
|
| 18 |
+
and Leaky ReLU activation in between. The first convolution has a dilation,
|
| 19 |
+
while the second has a dilation of 1. A skip connection is added from the input
|
| 20 |
+
to the output.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
channels (int): The number of input and output channels.
|
| 24 |
+
kernel_size (int): The kernel size of the convolutional layers.
|
| 25 |
+
dilation (int): The dilation rate for the first convolutional layer.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(self, channels, kernel_size, dilation):
|
| 29 |
+
super().__init__()
|
| 30 |
+
self.conv1 = weight_norm(
|
| 31 |
+
torch.nn.Conv1d(
|
| 32 |
+
channels,
|
| 33 |
+
channels,
|
| 34 |
+
kernel_size,
|
| 35 |
+
padding=(kernel_size * dilation - dilation) // 2,
|
| 36 |
+
dilation=dilation,
|
| 37 |
+
)
|
| 38 |
+
)
|
| 39 |
+
self.conv2 = weight_norm(
|
| 40 |
+
torch.nn.Conv1d(
|
| 41 |
+
channels, channels, kernel_size, padding=kernel_size // 2, dilation=1
|
| 42 |
+
)
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
def forward(self, x: torch.Tensor):
|
| 46 |
+
y = torch.nn.functional.leaky_relu(x, LRELU_SLOPE)
|
| 47 |
+
y = self.conv1(y)
|
| 48 |
+
y = torch.nn.functional.leaky_relu(y, LRELU_SLOPE)
|
| 49 |
+
y = self.conv2(y)
|
| 50 |
+
return x + y
|
| 51 |
+
|
| 52 |
+
def remove_weight_norm(self):
|
| 53 |
+
remove_weight_norm(self.conv1)
|
| 54 |
+
remove_weight_norm(self.conv2)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class MRFBlock(torch.nn.Module):
|
| 58 |
+
"""
|
| 59 |
+
A Multi-Receptive Field (MRF) block.
|
| 60 |
+
|
| 61 |
+
This block consists of multiple MRFLayers with different dilation rates.
|
| 62 |
+
It applies each layer sequentially to the input.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
channels (int): The number of input and output channels for the MRFLayers.
|
| 66 |
+
kernel_size (int): The kernel size for the convolutional layers in the MRFLayers.
|
| 67 |
+
dilations (list[int]): A list of dilation rates for the MRFLayers.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
def __init__(self, channels, kernel_size, dilations):
|
| 71 |
+
super().__init__()
|
| 72 |
+
self.layers = torch.nn.ModuleList()
|
| 73 |
+
for dilation in dilations:
|
| 74 |
+
self.layers.append(MRFLayer(channels, kernel_size, dilation))
|
| 75 |
+
|
| 76 |
+
def forward(self, x: torch.Tensor):
|
| 77 |
+
for layer in self.layers:
|
| 78 |
+
x = layer(x)
|
| 79 |
+
return x
|
| 80 |
+
|
| 81 |
+
def remove_weight_norm(self):
|
| 82 |
+
for layer in self.layers:
|
| 83 |
+
layer.remove_weight_norm()
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class SineGenerator(torch.nn.Module):
|
| 87 |
+
"""
|
| 88 |
+
Definition of sine generator
|
| 89 |
+
|
| 90 |
+
Generates sine waveforms with optional harmonics and additive noise.
|
| 91 |
+
Can be used to create harmonic noise source for neural vocoders.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
samp_rate (int): Sampling rate in Hz.
|
| 95 |
+
harmonic_num (int): Number of harmonic overtones (default 0).
|
| 96 |
+
sine_amp (float): Amplitude of sine-waveform (default 0.1).
|
| 97 |
+
noise_std (float): Standard deviation of Gaussian noise (default 0.003).
|
| 98 |
+
voiced_threshold (float): F0 threshold for voiced/unvoiced classification (default 0).
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
def __init__(
|
| 102 |
+
self,
|
| 103 |
+
samp_rate: int,
|
| 104 |
+
harmonic_num: int = 0,
|
| 105 |
+
sine_amp: float = 0.1,
|
| 106 |
+
noise_std: float = 0.003,
|
| 107 |
+
voiced_threshold: float = 0,
|
| 108 |
+
):
|
| 109 |
+
super(SineGenerator, self).__init__()
|
| 110 |
+
self.sine_amp = sine_amp
|
| 111 |
+
self.noise_std = noise_std
|
| 112 |
+
self.harmonic_num = harmonic_num
|
| 113 |
+
self.dim = self.harmonic_num + 1
|
| 114 |
+
self.sampling_rate = samp_rate
|
| 115 |
+
self.voiced_threshold = voiced_threshold
|
| 116 |
+
|
| 117 |
+
def _f02uv(self, f0: torch.Tensor):
|
| 118 |
+
"""
|
| 119 |
+
Generates voiced/unvoiced (UV) signal based on the fundamental frequency (F0).
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
f0 (torch.Tensor): Fundamental frequency tensor of shape (batch_size, length, 1).
|
| 123 |
+
"""
|
| 124 |
+
# generate uv signal
|
| 125 |
+
uv = torch.ones_like(f0)
|
| 126 |
+
uv = uv * (f0 > self.voiced_threshold)
|
| 127 |
+
return uv
|
| 128 |
+
|
| 129 |
+
def _f02sine(self, f0_values: torch.Tensor):
|
| 130 |
+
"""
|
| 131 |
+
Generates sine waveforms based on the fundamental frequency (F0) and its harmonics.
|
| 132 |
+
|
| 133 |
+
Args:
|
| 134 |
+
f0_values (torch.Tensor): Tensor of fundamental frequency and its harmonics,
|
| 135 |
+
shape (batch_size, length, dim), where dim indicates
|
| 136 |
+
the fundamental tone and overtones.
|
| 137 |
+
"""
|
| 138 |
+
# convert to F0 in rad. The integer part n can be ignored
|
| 139 |
+
# because 2 * np.pi * n doesn't affect phase
|
| 140 |
+
rad_values = (f0_values / self.sampling_rate) % 1
|
| 141 |
+
|
| 142 |
+
# initial phase noise (no noise for fundamental component)
|
| 143 |
+
rand_ini = torch.rand(
|
| 144 |
+
f0_values.shape[0], f0_values.shape[2], device=f0_values.device
|
| 145 |
+
)
|
| 146 |
+
rand_ini[:, 0] = 0
|
| 147 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
| 148 |
+
|
| 149 |
+
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
|
| 150 |
+
tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
| 151 |
+
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
| 152 |
+
cumsum_shift = torch.zeros_like(rad_values)
|
| 153 |
+
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
| 154 |
+
|
| 155 |
+
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
|
| 156 |
+
|
| 157 |
+
return sines
|
| 158 |
+
|
| 159 |
+
def forward(self, f0: torch.Tensor):
|
| 160 |
+
with torch.no_grad():
|
| 161 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
| 162 |
+
# fundamental component
|
| 163 |
+
f0_buf[:, :, 0] = f0[:, :, 0]
|
| 164 |
+
for idx in np.arange(self.harmonic_num):
|
| 165 |
+
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
|
| 166 |
+
|
| 167 |
+
sine_waves = self._f02sine(f0_buf) * self.sine_amp
|
| 168 |
+
|
| 169 |
+
uv = self._f02uv(f0)
|
| 170 |
+
|
| 171 |
+
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
| 172 |
+
noise = noise_amp * torch.randn_like(sine_waves)
|
| 173 |
+
|
| 174 |
+
sine_waves = sine_waves * uv + noise
|
| 175 |
+
return sine_waves, uv, noise
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
| 179 |
+
"""
|
| 180 |
+
Generates harmonic and noise source features.
|
| 181 |
+
|
| 182 |
+
This module uses the SineGenerator to create harmonic signals based on the
|
| 183 |
+
fundamental frequency (F0) and merges them into a single excitation signal.
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
sample_rate (int): Sampling rate in Hz.
|
| 187 |
+
harmonic_num (int, optional): Number of harmonics above F0. Defaults to 0.
|
| 188 |
+
sine_amp (float, optional): Amplitude of sine source signal. Defaults to 0.1.
|
| 189 |
+
add_noise_std (float, optional): Standard deviation of additive Gaussian noise. Defaults to 0.003.
|
| 190 |
+
voiced_threshod (float, optional): Threshold to set voiced/unvoiced given F0. Defaults to 0.
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
def __init__(
|
| 194 |
+
self,
|
| 195 |
+
sampling_rate: int,
|
| 196 |
+
harmonic_num: int = 0,
|
| 197 |
+
sine_amp: float = 0.1,
|
| 198 |
+
add_noise_std: float = 0.003,
|
| 199 |
+
voiced_threshold: float = 0,
|
| 200 |
+
):
|
| 201 |
+
super(SourceModuleHnNSF, self).__init__()
|
| 202 |
+
|
| 203 |
+
self.sine_amp = sine_amp
|
| 204 |
+
self.noise_std = add_noise_std
|
| 205 |
+
|
| 206 |
+
# to produce sine waveforms
|
| 207 |
+
self.l_sin_gen = SineGenerator(
|
| 208 |
+
sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshold
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
# to merge source harmonics into a single excitation
|
| 212 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
| 213 |
+
self.l_tanh = torch.nn.Tanh()
|
| 214 |
+
|
| 215 |
+
def forward(self, x: torch.Tensor):
|
| 216 |
+
sine_wavs, uv, _ = self.l_sin_gen(x)
|
| 217 |
+
sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype)
|
| 218 |
+
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
| 219 |
+
|
| 220 |
+
return sine_merge, None, None
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
class HiFiGANMRFGenerator(torch.nn.Module):
|
| 224 |
+
"""
|
| 225 |
+
HiFi-GAN generator with Multi-Receptive Field (MRF) blocks.
|
| 226 |
+
|
| 227 |
+
This generator takes an input feature sequence and fundamental frequency (F0)
|
| 228 |
+
as input and generates an audio waveform. It utilizes transposed convolutions
|
| 229 |
+
for upsampling and MRF blocks for feature refinement. It can also condition
|
| 230 |
+
on global conditioning features.
|
| 231 |
+
|
| 232 |
+
Args:
|
| 233 |
+
in_channel (int): Number of input channels.
|
| 234 |
+
upsample_initial_channel (int): Number of channels after the initial convolution.
|
| 235 |
+
upsample_rates (list[int]): List of upsampling rates for the transposed convolutions.
|
| 236 |
+
upsample_kernel_sizes (list[int]): List of kernel sizes for the transposed convolutions.
|
| 237 |
+
resblock_kernel_sizes (list[int]): List of kernel sizes for the convolutional layers in the MRF blocks.
|
| 238 |
+
resblock_dilations (list[list[int]]): List of lists of dilation rates for the MRF blocks.
|
| 239 |
+
gin_channels (int): Number of global conditioning input channels (0 if no global conditioning).
|
| 240 |
+
sample_rate (int): Sampling rate of the audio.
|
| 241 |
+
harmonic_num (int): Number of harmonics to generate.
|
| 242 |
+
checkpointing (bool): Whether to use checkpointing to save memory during training (default: False).
|
| 243 |
+
"""
|
| 244 |
+
|
| 245 |
+
def __init__(
|
| 246 |
+
self,
|
| 247 |
+
in_channel: int,
|
| 248 |
+
upsample_initial_channel: int,
|
| 249 |
+
upsample_rates: list[int],
|
| 250 |
+
upsample_kernel_sizes: list[int],
|
| 251 |
+
resblock_kernel_sizes: list[int],
|
| 252 |
+
resblock_dilations: list[list[int]],
|
| 253 |
+
gin_channels: int,
|
| 254 |
+
sample_rate: int,
|
| 255 |
+
harmonic_num: int,
|
| 256 |
+
checkpointing: bool = False,
|
| 257 |
+
):
|
| 258 |
+
super().__init__()
|
| 259 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
| 260 |
+
self.checkpointing = checkpointing
|
| 261 |
+
|
| 262 |
+
self.f0_upsample = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
|
| 263 |
+
self.m_source = SourceModuleHnNSF(sample_rate, harmonic_num)
|
| 264 |
+
|
| 265 |
+
self.conv_pre = weight_norm(
|
| 266 |
+
torch.nn.Conv1d(
|
| 267 |
+
in_channel, upsample_initial_channel, kernel_size=7, stride=1, padding=3
|
| 268 |
+
)
|
| 269 |
+
)
|
| 270 |
+
self.upsamples = torch.nn.ModuleList()
|
| 271 |
+
self.noise_convs = torch.nn.ModuleList()
|
| 272 |
+
|
| 273 |
+
stride_f0s = [
|
| 274 |
+
math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1
|
| 275 |
+
for i in range(len(upsample_rates))
|
| 276 |
+
]
|
| 277 |
+
|
| 278 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
| 279 |
+
# handling odd upsampling rates
|
| 280 |
+
if u % 2 == 0:
|
| 281 |
+
# old method
|
| 282 |
+
padding = (k - u) // 2
|
| 283 |
+
else:
|
| 284 |
+
padding = u // 2 + u % 2
|
| 285 |
+
|
| 286 |
+
self.upsamples.append(
|
| 287 |
+
weight_norm(
|
| 288 |
+
torch.nn.ConvTranspose1d(
|
| 289 |
+
upsample_initial_channel // (2**i),
|
| 290 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
| 291 |
+
kernel_size=k,
|
| 292 |
+
stride=u,
|
| 293 |
+
padding=padding,
|
| 294 |
+
output_padding=u % 2,
|
| 295 |
+
)
|
| 296 |
+
)
|
| 297 |
+
)
|
| 298 |
+
""" handling odd upsampling rates
|
| 299 |
+
# s k p
|
| 300 |
+
# 40 80 20
|
| 301 |
+
# 32 64 16
|
| 302 |
+
# 4 8 2
|
| 303 |
+
# 2 3 1
|
| 304 |
+
# 63 125 31
|
| 305 |
+
# 9 17 4
|
| 306 |
+
# 3 5 1
|
| 307 |
+
# 1 1 0
|
| 308 |
+
"""
|
| 309 |
+
stride = stride_f0s[i]
|
| 310 |
+
kernel = 1 if stride == 1 else stride * 2 - stride % 2
|
| 311 |
+
padding = 0 if stride == 1 else (kernel - stride) // 2
|
| 312 |
+
|
| 313 |
+
self.noise_convs.append(
|
| 314 |
+
torch.nn.Conv1d(
|
| 315 |
+
1,
|
| 316 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
| 317 |
+
kernel_size=kernel,
|
| 318 |
+
stride=stride,
|
| 319 |
+
padding=padding,
|
| 320 |
+
)
|
| 321 |
+
)
|
| 322 |
+
self.mrfs = torch.nn.ModuleList()
|
| 323 |
+
for i in range(len(self.upsamples)):
|
| 324 |
+
channel = upsample_initial_channel // (2 ** (i + 1))
|
| 325 |
+
self.mrfs.append(
|
| 326 |
+
torch.nn.ModuleList(
|
| 327 |
+
[
|
| 328 |
+
MRFBlock(channel, kernel_size=k, dilations=d)
|
| 329 |
+
for k, d in zip(resblock_kernel_sizes, resblock_dilations)
|
| 330 |
+
]
|
| 331 |
+
)
|
| 332 |
+
)
|
| 333 |
+
self.conv_post = weight_norm(
|
| 334 |
+
torch.nn.Conv1d(channel, 1, kernel_size=7, stride=1, padding=3)
|
| 335 |
+
)
|
| 336 |
+
if gin_channels != 0:
|
| 337 |
+
self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
| 338 |
+
|
| 339 |
+
def forward(
|
| 340 |
+
self, x: torch.Tensor, f0: torch.Tensor, g: Optional[torch.Tensor] = None
|
| 341 |
+
):
|
| 342 |
+
f0 = self.f0_upsample(f0[:, None, :]).transpose(-1, -2)
|
| 343 |
+
har_source, _, _ = self.m_source(f0)
|
| 344 |
+
har_source = har_source.transpose(-1, -2)
|
| 345 |
+
x = self.conv_pre(x)
|
| 346 |
+
|
| 347 |
+
if g is not None:
|
| 348 |
+
x = x + self.cond(g)
|
| 349 |
+
|
| 350 |
+
for ups, mrf, noise_conv in zip(self.upsamples, self.mrfs, self.noise_convs):
|
| 351 |
+
x = torch.nn.functional.leaky_relu(x, LRELU_SLOPE)
|
| 352 |
+
|
| 353 |
+
if self.training and self.checkpointing:
|
| 354 |
+
x = checkpoint(ups, x, use_reentrant=False)
|
| 355 |
+
x = x + noise_conv(har_source)
|
| 356 |
+
xs = sum([checkpoint(layer, x, use_reentrant=False) for layer in mrf])
|
| 357 |
+
else:
|
| 358 |
+
x = ups(x)
|
| 359 |
+
x = x + noise_conv(har_source)
|
| 360 |
+
xs = sum([layer(x) for layer in mrf])
|
| 361 |
+
x = xs / self.num_kernels
|
| 362 |
+
|
| 363 |
+
x = torch.nn.functional.leaky_relu(x)
|
| 364 |
+
x = torch.tanh(self.conv_post(x))
|
| 365 |
+
|
| 366 |
+
return x
|
| 367 |
+
|
| 368 |
+
def remove_weight_norm(self):
|
| 369 |
+
remove_weight_norm(self.conv_pre)
|
| 370 |
+
for up in self.upsamples:
|
| 371 |
+
remove_weight_norm(up)
|
| 372 |
+
for mrf in self.mrfs:
|
| 373 |
+
mrf.remove_weight_norm()
|
| 374 |
+
remove_weight_norm(self.conv_post)
|
mvsepless/vbach_lib/algorithm/generators/hifigan_nsf.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch.nn.utils import remove_weight_norm
|
| 6 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 7 |
+
from torch.utils.checkpoint import checkpoint
|
| 8 |
+
|
| 9 |
+
from ..commons import init_weights
|
| 10 |
+
from .hifigan import SineGenerator
|
| 11 |
+
from ..residuals import LRELU_SLOPE, ResBlock
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
| 15 |
+
"""
|
| 16 |
+
Source Module for generating harmonic and noise components for audio synthesis.
|
| 17 |
+
|
| 18 |
+
This module generates a harmonic source signal using sine waves and adds
|
| 19 |
+
optional noise. It's often used in neural vocoders as a source of excitation.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
sample_rate (int): Sampling rate of the audio in Hz.
|
| 23 |
+
harmonic_num (int, optional): Number of harmonic overtones to generate above the fundamental frequency (F0). Defaults to 0.
|
| 24 |
+
sine_amp (float, optional): Amplitude of the sine wave components. Defaults to 0.1.
|
| 25 |
+
add_noise_std (float, optional): Standard deviation of the additive white Gaussian noise. Defaults to 0.003.
|
| 26 |
+
voiced_threshod (float, optional): Threshold for the fundamental frequency (F0) to determine if a frame is voiced. If F0 is below this threshold, it's considered unvoiced. Defaults to 0.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
sample_rate: int,
|
| 32 |
+
harmonic_num: int = 0,
|
| 33 |
+
sine_amp: float = 0.1,
|
| 34 |
+
add_noise_std: float = 0.003,
|
| 35 |
+
voiced_threshod: float = 0,
|
| 36 |
+
):
|
| 37 |
+
super(SourceModuleHnNSF, self).__init__()
|
| 38 |
+
|
| 39 |
+
self.sine_amp = sine_amp
|
| 40 |
+
self.noise_std = add_noise_std
|
| 41 |
+
|
| 42 |
+
self.l_sin_gen = SineGenerator(
|
| 43 |
+
sample_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
|
| 44 |
+
)
|
| 45 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
| 46 |
+
self.l_tanh = torch.nn.Tanh()
|
| 47 |
+
|
| 48 |
+
def forward(self, x: torch.Tensor, upsample_factor: int = 1):
|
| 49 |
+
sine_wavs, uv, _ = self.l_sin_gen(x, upsample_factor)
|
| 50 |
+
sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype)
|
| 51 |
+
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
| 52 |
+
return sine_merge, None, None
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class HiFiGANNSFGenerator(torch.nn.Module):
|
| 56 |
+
"""
|
| 57 |
+
Generator module based on the Neural Source Filter (NSF) architecture.
|
| 58 |
+
|
| 59 |
+
This generator synthesizes audio by first generating a source excitation signal
|
| 60 |
+
(harmonic and noise) and then filtering it through a series of upsampling and
|
| 61 |
+
residual blocks. Global conditioning can be applied to influence the generation.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
initial_channel (int): Number of input channels to the initial convolutional layer.
|
| 65 |
+
resblock_kernel_sizes (list): List of kernel sizes for the residual blocks.
|
| 66 |
+
resblock_dilation_sizes (list): List of lists of dilation rates for the residual blocks, corresponding to each kernel size.
|
| 67 |
+
upsample_rates (list): List of upsampling factors for each upsampling layer.
|
| 68 |
+
upsample_initial_channel (int): Number of output channels from the initial convolutional layer, which is also the input to the first upsampling layer.
|
| 69 |
+
upsample_kernel_sizes (list): List of kernel sizes for the transposed convolutional layers used for upsampling.
|
| 70 |
+
gin_channels (int): Number of input channels for the global conditioning. If 0, no global conditioning is used.
|
| 71 |
+
sr (int): Sampling rate of the audio.
|
| 72 |
+
checkpointing (bool, optional): Whether to use gradient checkpointing to save memory during training. Defaults to False.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
def __init__(
|
| 76 |
+
self,
|
| 77 |
+
initial_channel: int,
|
| 78 |
+
resblock_kernel_sizes: list,
|
| 79 |
+
resblock_dilation_sizes: list,
|
| 80 |
+
upsample_rates: list,
|
| 81 |
+
upsample_initial_channel: int,
|
| 82 |
+
upsample_kernel_sizes: list,
|
| 83 |
+
gin_channels: int,
|
| 84 |
+
sr: int,
|
| 85 |
+
checkpointing: bool = False,
|
| 86 |
+
):
|
| 87 |
+
super(HiFiGANNSFGenerator, self).__init__()
|
| 88 |
+
|
| 89 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
| 90 |
+
self.num_upsamples = len(upsample_rates)
|
| 91 |
+
self.checkpointing = checkpointing
|
| 92 |
+
self.f0_upsamp = torch.nn.Upsample(scale_factor=math.prod(upsample_rates))
|
| 93 |
+
self.m_source = SourceModuleHnNSF(sample_rate=sr, harmonic_num=0)
|
| 94 |
+
|
| 95 |
+
self.conv_pre = torch.nn.Conv1d(
|
| 96 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
self.ups = torch.nn.ModuleList()
|
| 100 |
+
self.noise_convs = torch.nn.ModuleList()
|
| 101 |
+
|
| 102 |
+
channels = [
|
| 103 |
+
upsample_initial_channel // (2 ** (i + 1))
|
| 104 |
+
for i in range(len(upsample_rates))
|
| 105 |
+
]
|
| 106 |
+
stride_f0s = [
|
| 107 |
+
math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1
|
| 108 |
+
for i in range(len(upsample_rates))
|
| 109 |
+
]
|
| 110 |
+
|
| 111 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
| 112 |
+
# handling odd upsampling rates
|
| 113 |
+
if u % 2 == 0:
|
| 114 |
+
# old method
|
| 115 |
+
padding = (k - u) // 2
|
| 116 |
+
else:
|
| 117 |
+
padding = u // 2 + u % 2
|
| 118 |
+
|
| 119 |
+
self.ups.append(
|
| 120 |
+
weight_norm(
|
| 121 |
+
torch.nn.ConvTranspose1d(
|
| 122 |
+
upsample_initial_channel // (2**i),
|
| 123 |
+
channels[i],
|
| 124 |
+
k,
|
| 125 |
+
u,
|
| 126 |
+
padding=padding,
|
| 127 |
+
output_padding=u % 2,
|
| 128 |
+
)
|
| 129 |
+
)
|
| 130 |
+
)
|
| 131 |
+
""" handling odd upsampling rates
|
| 132 |
+
# s k p
|
| 133 |
+
# 40 80 20
|
| 134 |
+
# 32 64 16
|
| 135 |
+
# 4 8 2
|
| 136 |
+
# 2 3 1
|
| 137 |
+
# 63 125 31
|
| 138 |
+
# 9 17 4
|
| 139 |
+
# 3 5 1
|
| 140 |
+
# 1 1 0
|
| 141 |
+
"""
|
| 142 |
+
stride = stride_f0s[i]
|
| 143 |
+
kernel = 1 if stride == 1 else stride * 2 - stride % 2
|
| 144 |
+
padding = 0 if stride == 1 else (kernel - stride) // 2
|
| 145 |
+
|
| 146 |
+
self.noise_convs.append(
|
| 147 |
+
torch.nn.Conv1d(
|
| 148 |
+
1,
|
| 149 |
+
channels[i],
|
| 150 |
+
kernel_size=kernel,
|
| 151 |
+
stride=stride,
|
| 152 |
+
padding=padding,
|
| 153 |
+
)
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
self.resblocks = torch.nn.ModuleList(
|
| 157 |
+
[
|
| 158 |
+
ResBlock(channels[i], k, d)
|
| 159 |
+
for i in range(len(self.ups))
|
| 160 |
+
for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
| 161 |
+
]
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
self.conv_post = torch.nn.Conv1d(channels[-1], 1, 7, 1, padding=3, bias=False)
|
| 165 |
+
self.ups.apply(init_weights)
|
| 166 |
+
|
| 167 |
+
if gin_channels != 0:
|
| 168 |
+
self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
| 169 |
+
|
| 170 |
+
self.upp = math.prod(upsample_rates)
|
| 171 |
+
self.lrelu_slope = LRELU_SLOPE
|
| 172 |
+
|
| 173 |
+
def forward(
|
| 174 |
+
self, x: torch.Tensor, f0: torch.Tensor, g: Optional[torch.Tensor] = None
|
| 175 |
+
):
|
| 176 |
+
har_source, _, _ = self.m_source(f0, self.upp)
|
| 177 |
+
har_source = har_source.transpose(1, 2)
|
| 178 |
+
# new tensor
|
| 179 |
+
x = self.conv_pre(x)
|
| 180 |
+
|
| 181 |
+
if g is not None:
|
| 182 |
+
x = x + self.cond(g)
|
| 183 |
+
|
| 184 |
+
for i, (ups, noise_convs) in enumerate(zip(self.ups, self.noise_convs)):
|
| 185 |
+
x = torch.nn.functional.leaky_relu(x, self.lrelu_slope)
|
| 186 |
+
# Apply upsampling layer
|
| 187 |
+
if self.training and self.checkpointing:
|
| 188 |
+
x = checkpoint(ups, x, use_reentrant=False)
|
| 189 |
+
x = x + noise_convs(har_source)
|
| 190 |
+
xs = sum(
|
| 191 |
+
[
|
| 192 |
+
checkpoint(resblock, x, use_reentrant=False)
|
| 193 |
+
for j, resblock in enumerate(self.resblocks)
|
| 194 |
+
if j in range(i * self.num_kernels, (i + 1) * self.num_kernels)
|
| 195 |
+
]
|
| 196 |
+
)
|
| 197 |
+
else:
|
| 198 |
+
x = ups(x)
|
| 199 |
+
x = x + noise_convs(har_source)
|
| 200 |
+
xs = sum(
|
| 201 |
+
[
|
| 202 |
+
resblock(x)
|
| 203 |
+
for j, resblock in enumerate(self.resblocks)
|
| 204 |
+
if j in range(i * self.num_kernels, (i + 1) * self.num_kernels)
|
| 205 |
+
]
|
| 206 |
+
)
|
| 207 |
+
x = xs / self.num_kernels
|
| 208 |
+
|
| 209 |
+
x = torch.nn.functional.leaky_relu(x)
|
| 210 |
+
x = torch.tanh(self.conv_post(x))
|
| 211 |
+
|
| 212 |
+
return x
|
| 213 |
+
|
| 214 |
+
def remove_weight_norm(self):
|
| 215 |
+
for l in self.ups:
|
| 216 |
+
remove_weight_norm(l)
|
| 217 |
+
for l in self.resblocks:
|
| 218 |
+
l.remove_weight_norm()
|
| 219 |
+
|
| 220 |
+
def __prepare_scriptable__(self):
|
| 221 |
+
for l in self.ups:
|
| 222 |
+
for hook in l._forward_pre_hooks.values():
|
| 223 |
+
if (
|
| 224 |
+
hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
|
| 225 |
+
and hook.__class__.__name__ == "WeightNorm"
|
| 226 |
+
):
|
| 227 |
+
remove_weight_norm(l)
|
| 228 |
+
for l in self.resblocks:
|
| 229 |
+
for hook in l._forward_pre_hooks.values():
|
| 230 |
+
if (
|
| 231 |
+
hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
|
| 232 |
+
and hook.__class__.__name__ == "WeightNorm"
|
| 233 |
+
):
|
| 234 |
+
remove_weight_norm(l)
|
| 235 |
+
return self
|
mvsepless/vbach_lib/algorithm/generators/refinegan.py
ADDED
|
@@ -0,0 +1,451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torchaudio
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 7 |
+
from torch.nn.utils import remove_weight_norm
|
| 8 |
+
from torch.utils.checkpoint import checkpoint
|
| 9 |
+
|
| 10 |
+
from ..commons import init_weights, get_padding
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ResBlock(nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
Residual block with multiple dilated convolutions.
|
| 16 |
+
|
| 17 |
+
This block applies a sequence of dilated convolutional layers with Leaky ReLU activation.
|
| 18 |
+
It's designed to capture information at different scales due to the varying dilation rates.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
in_channels (int): Number of input channels.
|
| 22 |
+
out_channels (int): Number of output channels.
|
| 23 |
+
kernel_size (int, optional): Kernel size for the convolutional layers. Defaults to 7.
|
| 24 |
+
dilation (tuple[int], optional): Tuple of dilation rates for the convolutional layers. Defaults to (1, 3, 5).
|
| 25 |
+
leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation. Defaults to 0.2.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(
|
| 29 |
+
self,
|
| 30 |
+
channels: int,
|
| 31 |
+
kernel_size: int = 7,
|
| 32 |
+
dilation: tuple[int] = (1, 3, 5),
|
| 33 |
+
leaky_relu_slope: float = 0.2,
|
| 34 |
+
):
|
| 35 |
+
super().__init__()
|
| 36 |
+
|
| 37 |
+
self.leaky_relu_slope = leaky_relu_slope
|
| 38 |
+
|
| 39 |
+
self.convs1 = nn.ModuleList(
|
| 40 |
+
[
|
| 41 |
+
weight_norm(
|
| 42 |
+
nn.Conv1d(
|
| 43 |
+
channels,
|
| 44 |
+
channels,
|
| 45 |
+
kernel_size,
|
| 46 |
+
stride=1,
|
| 47 |
+
dilation=d,
|
| 48 |
+
padding=get_padding(kernel_size, d),
|
| 49 |
+
)
|
| 50 |
+
)
|
| 51 |
+
for d in dilation
|
| 52 |
+
]
|
| 53 |
+
)
|
| 54 |
+
self.convs1.apply(init_weights)
|
| 55 |
+
|
| 56 |
+
self.convs2 = nn.ModuleList(
|
| 57 |
+
[
|
| 58 |
+
weight_norm(
|
| 59 |
+
nn.Conv1d(
|
| 60 |
+
channels,
|
| 61 |
+
channels,
|
| 62 |
+
kernel_size,
|
| 63 |
+
stride=1,
|
| 64 |
+
dilation=1,
|
| 65 |
+
padding=get_padding(kernel_size, 1),
|
| 66 |
+
)
|
| 67 |
+
)
|
| 68 |
+
for d in dilation
|
| 69 |
+
]
|
| 70 |
+
)
|
| 71 |
+
self.convs2.apply(init_weights)
|
| 72 |
+
|
| 73 |
+
def forward(self, x: torch.Tensor):
|
| 74 |
+
for c1, c2 in zip(self.convs1, self.convs2):
|
| 75 |
+
xt = F.leaky_relu(x, self.leaky_relu_slope)
|
| 76 |
+
xt = c1(xt)
|
| 77 |
+
xt = F.leaky_relu(xt, self.leaky_relu_slope)
|
| 78 |
+
xt = c2(xt)
|
| 79 |
+
x = xt + x
|
| 80 |
+
|
| 81 |
+
return x
|
| 82 |
+
|
| 83 |
+
def remove_weight_norm(self):
|
| 84 |
+
for c1, c2 in zip(self.convs1, self.convs2):
|
| 85 |
+
remove_weight_norm(c1)
|
| 86 |
+
remove_weight_norm(c2)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class AdaIN(nn.Module):
|
| 90 |
+
"""
|
| 91 |
+
Adaptive Instance Normalization layer.
|
| 92 |
+
|
| 93 |
+
This layer applies a scaling factor to the input based on a learnable weight.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
channels (int): Number of input channels.
|
| 97 |
+
leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation applied after scaling. Defaults to 0.2.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
def __init__(
|
| 101 |
+
self,
|
| 102 |
+
*,
|
| 103 |
+
channels: int,
|
| 104 |
+
leaky_relu_slope: float = 0.2,
|
| 105 |
+
):
|
| 106 |
+
super().__init__()
|
| 107 |
+
|
| 108 |
+
self.weight = nn.Parameter(torch.ones(channels) * 1e-4)
|
| 109 |
+
# safe to use in-place as it is used on a new x+gaussian tensor
|
| 110 |
+
self.activation = nn.LeakyReLU(leaky_relu_slope)
|
| 111 |
+
|
| 112 |
+
def forward(self, x: torch.Tensor):
|
| 113 |
+
gaussian = torch.randn_like(x) * self.weight[None, :, None]
|
| 114 |
+
|
| 115 |
+
return self.activation(x + gaussian)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class ParallelResBlock(nn.Module):
|
| 119 |
+
"""
|
| 120 |
+
Parallel residual block that applies multiple residual blocks with different kernel sizes in parallel.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
in_channels (int): Number of input channels.
|
| 124 |
+
out_channels (int): Number of output channels.
|
| 125 |
+
kernel_sizes (tuple[int], optional): Tuple of kernel sizes for the parallel residual blocks. Defaults to (3, 7, 11).
|
| 126 |
+
dilation (tuple[int], optional): Tuple of dilation rates for the convolutional layers within the residual blocks. Defaults to (1, 3, 5).
|
| 127 |
+
leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation. Defaults to 0.2.
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
def __init__(
|
| 131 |
+
self,
|
| 132 |
+
*,
|
| 133 |
+
in_channels: int,
|
| 134 |
+
out_channels: int,
|
| 135 |
+
kernel_sizes: tuple[int] = (3, 7, 11),
|
| 136 |
+
dilation: tuple[int] = (1, 3, 5),
|
| 137 |
+
leaky_relu_slope: float = 0.2,
|
| 138 |
+
):
|
| 139 |
+
super().__init__()
|
| 140 |
+
|
| 141 |
+
self.in_channels = in_channels
|
| 142 |
+
self.out_channels = out_channels
|
| 143 |
+
|
| 144 |
+
self.input_conv = nn.Conv1d(
|
| 145 |
+
in_channels=in_channels,
|
| 146 |
+
out_channels=out_channels,
|
| 147 |
+
kernel_size=7,
|
| 148 |
+
stride=1,
|
| 149 |
+
padding=3,
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
self.input_conv.apply(init_weights)
|
| 153 |
+
|
| 154 |
+
self.blocks = nn.ModuleList(
|
| 155 |
+
[
|
| 156 |
+
nn.Sequential(
|
| 157 |
+
AdaIN(channels=out_channels),
|
| 158 |
+
ResBlock(
|
| 159 |
+
out_channels,
|
| 160 |
+
kernel_size=kernel_size,
|
| 161 |
+
dilation=dilation,
|
| 162 |
+
leaky_relu_slope=leaky_relu_slope,
|
| 163 |
+
),
|
| 164 |
+
AdaIN(channels=out_channels),
|
| 165 |
+
)
|
| 166 |
+
for kernel_size in kernel_sizes
|
| 167 |
+
]
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
def forward(self, x: torch.Tensor):
|
| 171 |
+
x = self.input_conv(x)
|
| 172 |
+
return torch.stack([block(x) for block in self.blocks], dim=0).mean(dim=0)
|
| 173 |
+
|
| 174 |
+
def remove_weight_norm(self):
|
| 175 |
+
remove_weight_norm(self.input_conv)
|
| 176 |
+
for block in self.blocks:
|
| 177 |
+
block[1].remove_weight_norm()
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
class SineGenerator(nn.Module):
|
| 181 |
+
"""
|
| 182 |
+
Definition of sine generator
|
| 183 |
+
|
| 184 |
+
Generates sine waveforms with optional harmonics and additive noise.
|
| 185 |
+
Can be used to create harmonic noise source for neural vocoders.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
samp_rate (int): Sampling rate in Hz.
|
| 189 |
+
harmonic_num (int): Number of harmonic overtones (default 0).
|
| 190 |
+
sine_amp (float): Amplitude of sine-waveform (default 0.1).
|
| 191 |
+
noise_std (float): Standard deviation of Gaussian noise (default 0.003).
|
| 192 |
+
voiced_threshold (float): F0 threshold for voiced/unvoiced classification (default 0).
|
| 193 |
+
"""
|
| 194 |
+
|
| 195 |
+
def __init__(
|
| 196 |
+
self,
|
| 197 |
+
samp_rate,
|
| 198 |
+
harmonic_num=0,
|
| 199 |
+
sine_amp=0.1,
|
| 200 |
+
noise_std=0.003,
|
| 201 |
+
voiced_threshold=0,
|
| 202 |
+
):
|
| 203 |
+
super(SineGenerator, self).__init__()
|
| 204 |
+
self.sine_amp = sine_amp
|
| 205 |
+
self.noise_std = noise_std
|
| 206 |
+
self.harmonic_num = harmonic_num
|
| 207 |
+
self.dim = self.harmonic_num + 1
|
| 208 |
+
self.sampling_rate = samp_rate
|
| 209 |
+
self.voiced_threshold = voiced_threshold
|
| 210 |
+
|
| 211 |
+
self.merge = nn.Sequential(
|
| 212 |
+
nn.Linear(self.dim, 1, bias=False),
|
| 213 |
+
nn.Tanh(),
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
def _f02uv(self, f0):
|
| 217 |
+
# generate uv signal
|
| 218 |
+
uv = torch.ones_like(f0)
|
| 219 |
+
uv = uv * (f0 > self.voiced_threshold)
|
| 220 |
+
return uv
|
| 221 |
+
|
| 222 |
+
def _f02sine(self, f0_values):
|
| 223 |
+
"""f0_values: (batchsize, length, dim)
|
| 224 |
+
where dim indicates fundamental tone and overtones
|
| 225 |
+
"""
|
| 226 |
+
# convert to F0 in rad. The integer part n can be ignored
|
| 227 |
+
# because 2 * np.pi * n doesn't affect phase
|
| 228 |
+
rad_values = (f0_values / self.sampling_rate) % 1
|
| 229 |
+
|
| 230 |
+
# initial phase noise (no noise for fundamental component)
|
| 231 |
+
rand_ini = torch.rand(
|
| 232 |
+
f0_values.shape[0], f0_values.shape[2], device=f0_values.device
|
| 233 |
+
)
|
| 234 |
+
rand_ini[:, 0] = 0
|
| 235 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
| 236 |
+
|
| 237 |
+
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
|
| 238 |
+
tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
| 239 |
+
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
| 240 |
+
cumsum_shift = torch.zeros_like(rad_values)
|
| 241 |
+
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
| 242 |
+
|
| 243 |
+
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
|
| 244 |
+
|
| 245 |
+
return sines
|
| 246 |
+
|
| 247 |
+
def forward(self, f0):
|
| 248 |
+
with torch.no_grad():
|
| 249 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
| 250 |
+
# fundamental component
|
| 251 |
+
f0_buf[:, :, 0] = f0[:, :, 0]
|
| 252 |
+
for idx in np.arange(self.harmonic_num):
|
| 253 |
+
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
|
| 254 |
+
|
| 255 |
+
sine_waves = self._f02sine(f0_buf) * self.sine_amp
|
| 256 |
+
|
| 257 |
+
uv = self._f02uv(f0)
|
| 258 |
+
|
| 259 |
+
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
| 260 |
+
noise = noise_amp * torch.randn_like(sine_waves)
|
| 261 |
+
|
| 262 |
+
sine_waves = sine_waves * uv + noise
|
| 263 |
+
|
| 264 |
+
# merge with grad
|
| 265 |
+
return self.merge(sine_waves)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
class RefineGANGenerator(nn.Module):
|
| 269 |
+
"""
|
| 270 |
+
RefineGAN generator for audio synthesis.
|
| 271 |
+
|
| 272 |
+
This generator uses a combination of downsampling, residual blocks, and parallel residual blocks
|
| 273 |
+
to refine an input mel-spectrogram and fundamental frequency (F0) into an audio waveform.
|
| 274 |
+
It can also incorporate global conditioning.
|
| 275 |
+
|
| 276 |
+
Args:
|
| 277 |
+
sample_rate (int, optional): Sampling rate of the audio. Defaults to 44100.
|
| 278 |
+
downsample_rates (tuple[int], optional): Downsampling rates for the downsampling blocks. Defaults to (2, 2, 8, 8).
|
| 279 |
+
upsample_rates (tuple[int], optional): Upsampling rates for the upsampling blocks. Defaults to (8, 8, 2, 2).
|
| 280 |
+
leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation. Defaults to 0.2.
|
| 281 |
+
num_mels (int, optional): Number of mel-frequency bins in the input mel-spectrogram. Defaults to 128.
|
| 282 |
+
start_channels (int, optional): Number of channels in the initial convolutional layer. Defaults to 16.
|
| 283 |
+
gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 256.
|
| 284 |
+
checkpointing (bool, optional): Whether to use checkpointing for memory efficiency. Defaults to False.
|
| 285 |
+
"""
|
| 286 |
+
|
| 287 |
+
def __init__(
|
| 288 |
+
self,
|
| 289 |
+
*,
|
| 290 |
+
sample_rate: int = 44100,
|
| 291 |
+
downsample_rates: tuple[int] = (2, 2, 8, 8), # unused
|
| 292 |
+
upsample_rates: tuple[int] = (8, 8, 2, 2),
|
| 293 |
+
leaky_relu_slope: float = 0.2,
|
| 294 |
+
num_mels: int = 128,
|
| 295 |
+
start_channels: int = 16, # unused
|
| 296 |
+
gin_channels: int = 256,
|
| 297 |
+
checkpointing: bool = False,
|
| 298 |
+
upsample_initial_channel=512,
|
| 299 |
+
):
|
| 300 |
+
super().__init__()
|
| 301 |
+
self.upsample_rates = upsample_rates
|
| 302 |
+
self.leaky_relu_slope = leaky_relu_slope
|
| 303 |
+
self.checkpointing = checkpointing
|
| 304 |
+
|
| 305 |
+
self.upp = np.prod(upsample_rates)
|
| 306 |
+
self.m_source = SineGenerator(sample_rate)
|
| 307 |
+
|
| 308 |
+
# expanded f0 sinegen -> match mel_conv
|
| 309 |
+
# (8, 1, 17280) -> (8, 16, 17280)
|
| 310 |
+
self.pre_conv = weight_norm(
|
| 311 |
+
nn.Conv1d(
|
| 312 |
+
1,
|
| 313 |
+
16,
|
| 314 |
+
7,
|
| 315 |
+
1,
|
| 316 |
+
padding=3,
|
| 317 |
+
)
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
# (8, 16, 17280) = 4th upscale
|
| 321 |
+
# (8, 32, 8640) = 3rd upscale
|
| 322 |
+
# (8, 64, 4320) = 2nd upscale
|
| 323 |
+
# (8, 128, 432) = 1st upscale
|
| 324 |
+
# (8, 256, 36) merged to mel
|
| 325 |
+
|
| 326 |
+
# f0 downsampling and upchanneling
|
| 327 |
+
channels = start_channels
|
| 328 |
+
size = self.upp
|
| 329 |
+
self.downsample_blocks = nn.ModuleList([])
|
| 330 |
+
self.df0 = []
|
| 331 |
+
for i, u in enumerate(upsample_rates):
|
| 332 |
+
|
| 333 |
+
new_size = int(size / upsample_rates[-i - 1])
|
| 334 |
+
# T dimension factors for torchaudio.functional.resample
|
| 335 |
+
self.df0.append([size, new_size])
|
| 336 |
+
size = new_size
|
| 337 |
+
|
| 338 |
+
new_channels = channels * 2
|
| 339 |
+
self.downsample_blocks.append(
|
| 340 |
+
weight_norm(nn.Conv1d(channels, new_channels, 7, 1, padding=3))
|
| 341 |
+
)
|
| 342 |
+
channels = new_channels
|
| 343 |
+
|
| 344 |
+
# mel handling
|
| 345 |
+
channels = upsample_initial_channel
|
| 346 |
+
|
| 347 |
+
self.mel_conv = weight_norm(
|
| 348 |
+
nn.Conv1d(
|
| 349 |
+
num_mels,
|
| 350 |
+
channels // 2,
|
| 351 |
+
7,
|
| 352 |
+
1,
|
| 353 |
+
padding=3,
|
| 354 |
+
)
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
self.mel_conv.apply(init_weights)
|
| 358 |
+
|
| 359 |
+
if gin_channels != 0:
|
| 360 |
+
self.cond = nn.Conv1d(256, channels // 2, 1)
|
| 361 |
+
|
| 362 |
+
self.upsample_blocks = nn.ModuleList([])
|
| 363 |
+
self.upsample_conv_blocks = nn.ModuleList([])
|
| 364 |
+
|
| 365 |
+
for rate in upsample_rates:
|
| 366 |
+
new_channels = channels // 2
|
| 367 |
+
|
| 368 |
+
self.upsample_blocks.append(nn.Upsample(scale_factor=rate, mode="linear"))
|
| 369 |
+
|
| 370 |
+
self.upsample_conv_blocks.append(
|
| 371 |
+
ParallelResBlock(
|
| 372 |
+
in_channels=channels + channels // 4,
|
| 373 |
+
out_channels=new_channels,
|
| 374 |
+
kernel_sizes=(3, 7, 11),
|
| 375 |
+
dilation=(1, 3, 5),
|
| 376 |
+
leaky_relu_slope=leaky_relu_slope,
|
| 377 |
+
)
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
channels = new_channels
|
| 381 |
+
|
| 382 |
+
self.conv_post = weight_norm(
|
| 383 |
+
nn.Conv1d(channels, 1, 7, 1, padding=3, bias=False)
|
| 384 |
+
)
|
| 385 |
+
self.conv_post.apply(init_weights)
|
| 386 |
+
|
| 387 |
+
def forward(self, mel: torch.Tensor, f0: torch.Tensor, g: torch.Tensor = None):
|
| 388 |
+
f0_size = mel.shape[-1]
|
| 389 |
+
# change f0 helper to full size
|
| 390 |
+
f0 = F.interpolate(f0.unsqueeze(1), size=f0_size * self.upp, mode="linear")
|
| 391 |
+
# get f0 turned into sines harmonics
|
| 392 |
+
har_source = self.m_source(f0.transpose(1, 2)).transpose(1, 2)
|
| 393 |
+
# prepare for fusion to mel
|
| 394 |
+
x = self.pre_conv(har_source)
|
| 395 |
+
# downsampled/upchanneled versions for each upscale
|
| 396 |
+
downs = []
|
| 397 |
+
for block, (old_size, new_size) in zip(self.downsample_blocks, self.df0):
|
| 398 |
+
x = F.leaky_relu(x, self.leaky_relu_slope)
|
| 399 |
+
downs.append(x)
|
| 400 |
+
# attempt to cancel spectral aliasing
|
| 401 |
+
x = torchaudio.functional.resample(
|
| 402 |
+
x.contiguous(),
|
| 403 |
+
orig_freq=int(f0_size * old_size),
|
| 404 |
+
new_freq=int(f0_size * new_size),
|
| 405 |
+
lowpass_filter_width=64,
|
| 406 |
+
rolloff=0.9475937167399596,
|
| 407 |
+
resampling_method="sinc_interp_kaiser",
|
| 408 |
+
beta=14.769656459379492,
|
| 409 |
+
)
|
| 410 |
+
x = block(x)
|
| 411 |
+
|
| 412 |
+
# expanding spectrogram from 192 to 256 channels
|
| 413 |
+
mel = self.mel_conv(mel)
|
| 414 |
+
if g is not None:
|
| 415 |
+
# adding expanded speaker embedding
|
| 416 |
+
mel = mel + self.cond(g)
|
| 417 |
+
|
| 418 |
+
x = torch.cat([mel, x], dim=1)
|
| 419 |
+
|
| 420 |
+
for ups, res, down in zip(
|
| 421 |
+
self.upsample_blocks,
|
| 422 |
+
self.upsample_conv_blocks,
|
| 423 |
+
reversed(downs),
|
| 424 |
+
):
|
| 425 |
+
x = F.leaky_relu(x, self.leaky_relu_slope)
|
| 426 |
+
|
| 427 |
+
if self.training and self.checkpointing:
|
| 428 |
+
x = checkpoint(ups, x, use_reentrant=False)
|
| 429 |
+
x = torch.cat([x, down], dim=1)
|
| 430 |
+
x = checkpoint(res, x, use_reentrant=False)
|
| 431 |
+
else:
|
| 432 |
+
x = ups(x)
|
| 433 |
+
x = torch.cat([x, down], dim=1)
|
| 434 |
+
x = res(x)
|
| 435 |
+
|
| 436 |
+
x = F.leaky_relu(x, self.leaky_relu_slope)
|
| 437 |
+
x = self.conv_post(x)
|
| 438 |
+
x = torch.tanh(x)
|
| 439 |
+
|
| 440 |
+
return x
|
| 441 |
+
|
| 442 |
+
def remove_weight_norm(self):
|
| 443 |
+
remove_weight_norm(self.pre_conv)
|
| 444 |
+
remove_weight_norm(self.mel_conv)
|
| 445 |
+
remove_weight_norm(self.conv_post)
|
| 446 |
+
|
| 447 |
+
for block in self.downsample_blocks:
|
| 448 |
+
block.remove_weight_norm()
|
| 449 |
+
|
| 450 |
+
for block in self.upsample_conv_blocks:
|
| 451 |
+
block.remove_weight_norm()
|
mvsepless/vbach_lib/algorithm/modules.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from .commons import fused_add_tanh_sigmoid_multiply
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class WaveNet(torch.nn.Module):
|
| 6 |
+
"""
|
| 7 |
+
WaveNet residual blocks as used in WaveGlow.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
hidden_channels (int): Number of hidden channels.
|
| 11 |
+
kernel_size (int): Size of the convolutional kernel.
|
| 12 |
+
dilation_rate (int): Dilation rate of the convolution.
|
| 13 |
+
n_layers (int): Number of convolutional layers.
|
| 14 |
+
gin_channels (int, optional): Number of conditioning channels. Defaults to 0.
|
| 15 |
+
p_dropout (float, optional): Dropout probability. Defaults to 0.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(
|
| 19 |
+
self,
|
| 20 |
+
hidden_channels: int,
|
| 21 |
+
kernel_size: int,
|
| 22 |
+
dilation_rate,
|
| 23 |
+
n_layers: int,
|
| 24 |
+
gin_channels: int = 0,
|
| 25 |
+
p_dropout: int = 0,
|
| 26 |
+
):
|
| 27 |
+
super().__init__()
|
| 28 |
+
assert kernel_size % 2 == 1, "Kernel size must be odd for proper padding."
|
| 29 |
+
|
| 30 |
+
self.hidden_channels = hidden_channels
|
| 31 |
+
self.kernel_size = (kernel_size,)
|
| 32 |
+
self.dilation_rate = dilation_rate
|
| 33 |
+
self.n_layers = n_layers
|
| 34 |
+
self.gin_channels = gin_channels
|
| 35 |
+
self.p_dropout = p_dropout
|
| 36 |
+
self.n_channels_tensor = torch.IntTensor([hidden_channels]) # Static tensor
|
| 37 |
+
|
| 38 |
+
self.in_layers = torch.nn.ModuleList()
|
| 39 |
+
self.res_skip_layers = torch.nn.ModuleList()
|
| 40 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 41 |
+
|
| 42 |
+
# Conditional layer for global conditioning
|
| 43 |
+
if gin_channels:
|
| 44 |
+
self.cond_layer = torch.nn.utils.parametrizations.weight_norm(
|
| 45 |
+
torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1),
|
| 46 |
+
name="weight",
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
# Precompute dilations and paddings
|
| 50 |
+
dilations = [dilation_rate**i for i in range(n_layers)]
|
| 51 |
+
paddings = [(kernel_size * d - d) // 2 for d in dilations]
|
| 52 |
+
|
| 53 |
+
# Initialize layers
|
| 54 |
+
for i in range(n_layers):
|
| 55 |
+
self.in_layers.append(
|
| 56 |
+
torch.nn.utils.parametrizations.weight_norm(
|
| 57 |
+
torch.nn.Conv1d(
|
| 58 |
+
hidden_channels,
|
| 59 |
+
2 * hidden_channels,
|
| 60 |
+
kernel_size,
|
| 61 |
+
dilation=dilations[i],
|
| 62 |
+
padding=paddings[i],
|
| 63 |
+
),
|
| 64 |
+
name="weight",
|
| 65 |
+
)
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
res_skip_channels = (
|
| 69 |
+
hidden_channels if i == n_layers - 1 else 2 * hidden_channels
|
| 70 |
+
)
|
| 71 |
+
self.res_skip_layers.append(
|
| 72 |
+
torch.nn.utils.parametrizations.weight_norm(
|
| 73 |
+
torch.nn.Conv1d(hidden_channels, res_skip_channels, 1),
|
| 74 |
+
name="weight",
|
| 75 |
+
)
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
def forward(self, x, x_mask, g=None):
|
| 79 |
+
output = x.clone().zero_()
|
| 80 |
+
|
| 81 |
+
# Apply conditional layer if global conditioning is provided
|
| 82 |
+
g = self.cond_layer(g) if g is not None else None
|
| 83 |
+
|
| 84 |
+
for i in range(self.n_layers):
|
| 85 |
+
x_in = self.in_layers[i](x)
|
| 86 |
+
g_l = (
|
| 87 |
+
g[
|
| 88 |
+
:,
|
| 89 |
+
i * 2 * self.hidden_channels : (i + 1) * 2 * self.hidden_channels,
|
| 90 |
+
:,
|
| 91 |
+
]
|
| 92 |
+
if g is not None
|
| 93 |
+
else 0
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
# Activation with fused Tanh-Sigmoid
|
| 97 |
+
acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, self.n_channels_tensor)
|
| 98 |
+
acts = self.drop(acts)
|
| 99 |
+
|
| 100 |
+
# Residual and skip connections
|
| 101 |
+
res_skip_acts = self.res_skip_layers[i](acts)
|
| 102 |
+
if i < self.n_layers - 1:
|
| 103 |
+
res_acts = res_skip_acts[:, : self.hidden_channels, :]
|
| 104 |
+
x = (x + res_acts) * x_mask
|
| 105 |
+
output = output + res_skip_acts[:, self.hidden_channels :, :]
|
| 106 |
+
else:
|
| 107 |
+
output = output + res_skip_acts
|
| 108 |
+
|
| 109 |
+
return output * x_mask
|
| 110 |
+
|
| 111 |
+
def remove_weight_norm(self):
|
| 112 |
+
if self.gin_channels:
|
| 113 |
+
torch.nn.utils.remove_weight_norm(self.cond_layer)
|
| 114 |
+
for layer in self.in_layers:
|
| 115 |
+
torch.nn.utils.remove_weight_norm(layer)
|
| 116 |
+
for layer in self.res_skip_layers:
|
| 117 |
+
torch.nn.utils.remove_weight_norm(layer)
|
mvsepless/vbach_lib/algorithm/normalization.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LayerNorm(torch.nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Layer normalization module.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
channels (int): Number of channels.
|
| 10 |
+
eps (float, optional): Epsilon value for numerical stability. Defaults to 1e-5.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def __init__(self, channels: int, eps: float = 1e-5):
|
| 14 |
+
super().__init__()
|
| 15 |
+
self.eps = eps
|
| 16 |
+
self.gamma = torch.nn.Parameter(torch.ones(channels))
|
| 17 |
+
self.beta = torch.nn.Parameter(torch.zeros(channels))
|
| 18 |
+
|
| 19 |
+
def forward(self, x):
|
| 20 |
+
# Transpose to (batch_size, time_steps, channels) for layer_norm
|
| 21 |
+
x = x.transpose(1, -1)
|
| 22 |
+
x = torch.nn.functional.layer_norm(
|
| 23 |
+
x, (x.size(-1),), self.gamma, self.beta, self.eps
|
| 24 |
+
)
|
| 25 |
+
# Transpose back to (batch_size, channels, time_steps)
|
| 26 |
+
return x.transpose(1, -1)
|
mvsepless/vbach_lib/algorithm/residuals.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from itertools import chain
|
| 3 |
+
from typing import Optional, Tuple
|
| 4 |
+
from torch.nn.utils import remove_weight_norm
|
| 5 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 6 |
+
|
| 7 |
+
from .modules import WaveNet
|
| 8 |
+
from .commons import get_padding, init_weights
|
| 9 |
+
|
| 10 |
+
LRELU_SLOPE = 0.1
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def create_conv1d_layer(channels, kernel_size, dilation):
|
| 14 |
+
return weight_norm(
|
| 15 |
+
torch.nn.Conv1d(
|
| 16 |
+
channels,
|
| 17 |
+
channels,
|
| 18 |
+
kernel_size,
|
| 19 |
+
1,
|
| 20 |
+
dilation=dilation,
|
| 21 |
+
padding=get_padding(kernel_size, dilation),
|
| 22 |
+
)
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def apply_mask(tensor: torch.Tensor, mask: Optional[torch.Tensor]):
|
| 27 |
+
return tensor * mask if mask else tensor
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def apply_mask_(tensor: torch.Tensor, mask: Optional[torch.Tensor]):
|
| 31 |
+
return tensor.mul_(mask) if mask else tensor
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class ResBlock(torch.nn.Module):
|
| 35 |
+
"""
|
| 36 |
+
A residual block module that applies a series of 1D convolutional layers with residual connections.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(
|
| 40 |
+
self, channels: int, kernel_size: int = 3, dilations: Tuple[int] = (1, 3, 5)
|
| 41 |
+
):
|
| 42 |
+
"""
|
| 43 |
+
Initializes the ResBlock.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
channels (int): Number of input and output channels for the convolution layers.
|
| 47 |
+
kernel_size (int): Size of the convolution kernel. Defaults to 3.
|
| 48 |
+
dilations (Tuple[int]): Tuple of dilation rates for the convolution layers in the first set.
|
| 49 |
+
"""
|
| 50 |
+
super().__init__()
|
| 51 |
+
# Create convolutional layers with specified dilations and initialize weights
|
| 52 |
+
self.convs1 = self._create_convs(channels, kernel_size, dilations)
|
| 53 |
+
self.convs2 = self._create_convs(channels, kernel_size, [1] * len(dilations))
|
| 54 |
+
|
| 55 |
+
@staticmethod
|
| 56 |
+
def _create_convs(channels: int, kernel_size: int, dilations: Tuple[int]):
|
| 57 |
+
"""
|
| 58 |
+
Creates a list of 1D convolutional layers with specified dilations.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
channels (int): Number of input and output channels for the convolution layers.
|
| 62 |
+
kernel_size (int): Size of the convolution kernel.
|
| 63 |
+
dilations (Tuple[int]): Tuple of dilation rates for each convolution layer.
|
| 64 |
+
"""
|
| 65 |
+
layers = torch.nn.ModuleList(
|
| 66 |
+
[create_conv1d_layer(channels, kernel_size, d) for d in dilations]
|
| 67 |
+
)
|
| 68 |
+
layers.apply(init_weights)
|
| 69 |
+
return layers
|
| 70 |
+
|
| 71 |
+
def forward(self, x: torch.Tensor, x_mask: torch.Tensor = None):
|
| 72 |
+
for conv1, conv2 in zip(self.convs1, self.convs2):
|
| 73 |
+
x_residual = x
|
| 74 |
+
x = torch.nn.functional.leaky_relu(x, LRELU_SLOPE)
|
| 75 |
+
x = apply_mask(x, x_mask)
|
| 76 |
+
x = torch.nn.functional.leaky_relu(conv1(x), LRELU_SLOPE)
|
| 77 |
+
x = apply_mask(x, x_mask)
|
| 78 |
+
x = conv2(x)
|
| 79 |
+
x = x + x_residual
|
| 80 |
+
return apply_mask(x, x_mask)
|
| 81 |
+
|
| 82 |
+
def remove_weight_norm(self):
|
| 83 |
+
for conv in chain(self.convs1, self.convs2):
|
| 84 |
+
remove_weight_norm(conv)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class Flip(torch.nn.Module):
|
| 88 |
+
"""
|
| 89 |
+
Flip module for flow-based models.
|
| 90 |
+
|
| 91 |
+
This module flips the input along the time dimension.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def forward(self, x, *args, reverse=False, **kwargs):
|
| 95 |
+
x = torch.flip(x, [1])
|
| 96 |
+
if not reverse:
|
| 97 |
+
logdet = torch.zeros(x.size(0), dtype=x.dtype, device=x.device)
|
| 98 |
+
return x, logdet
|
| 99 |
+
else:
|
| 100 |
+
return x
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class ResidualCouplingBlock(torch.nn.Module):
|
| 104 |
+
"""
|
| 105 |
+
Residual Coupling Block for normalizing flow.
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
channels (int): Number of channels in the input.
|
| 109 |
+
hidden_channels (int): Number of hidden channels in the coupling layer.
|
| 110 |
+
kernel_size (int): Kernel size of the convolutional layers.
|
| 111 |
+
dilation_rate (int): Dilation rate of the convolutional layers.
|
| 112 |
+
n_layers (int): Number of layers in the coupling layer.
|
| 113 |
+
n_flows (int, optional): Number of coupling layers in the block. Defaults to 4.
|
| 114 |
+
gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 0.
|
| 115 |
+
"""
|
| 116 |
+
|
| 117 |
+
def __init__(
|
| 118 |
+
self,
|
| 119 |
+
channels: int,
|
| 120 |
+
hidden_channels: int,
|
| 121 |
+
kernel_size: int,
|
| 122 |
+
dilation_rate: int,
|
| 123 |
+
n_layers: int,
|
| 124 |
+
n_flows: int = 4,
|
| 125 |
+
gin_channels: int = 0,
|
| 126 |
+
):
|
| 127 |
+
super(ResidualCouplingBlock, self).__init__()
|
| 128 |
+
self.channels = channels
|
| 129 |
+
self.hidden_channels = hidden_channels
|
| 130 |
+
self.kernel_size = kernel_size
|
| 131 |
+
self.dilation_rate = dilation_rate
|
| 132 |
+
self.n_layers = n_layers
|
| 133 |
+
self.n_flows = n_flows
|
| 134 |
+
self.gin_channels = gin_channels
|
| 135 |
+
|
| 136 |
+
self.flows = torch.nn.ModuleList()
|
| 137 |
+
for _ in range(n_flows):
|
| 138 |
+
self.flows.append(
|
| 139 |
+
ResidualCouplingLayer(
|
| 140 |
+
channels,
|
| 141 |
+
hidden_channels,
|
| 142 |
+
kernel_size,
|
| 143 |
+
dilation_rate,
|
| 144 |
+
n_layers,
|
| 145 |
+
gin_channels=gin_channels,
|
| 146 |
+
mean_only=True,
|
| 147 |
+
)
|
| 148 |
+
)
|
| 149 |
+
self.flows.append(Flip())
|
| 150 |
+
|
| 151 |
+
def forward(
|
| 152 |
+
self,
|
| 153 |
+
x: torch.Tensor,
|
| 154 |
+
x_mask: torch.Tensor,
|
| 155 |
+
g: Optional[torch.Tensor] = None,
|
| 156 |
+
reverse: bool = False,
|
| 157 |
+
):
|
| 158 |
+
if not reverse:
|
| 159 |
+
for flow in self.flows:
|
| 160 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
| 161 |
+
else:
|
| 162 |
+
for flow in reversed(self.flows):
|
| 163 |
+
x = flow.forward(x, x_mask, g=g, reverse=reverse)
|
| 164 |
+
return x
|
| 165 |
+
|
| 166 |
+
def remove_weight_norm(self):
|
| 167 |
+
for i in range(self.n_flows):
|
| 168 |
+
self.flows[i * 2].remove_weight_norm()
|
| 169 |
+
|
| 170 |
+
def __prepare_scriptable__(self):
|
| 171 |
+
for i in range(self.n_flows):
|
| 172 |
+
for hook in self.flows[i * 2]._forward_pre_hooks.values():
|
| 173 |
+
if (
|
| 174 |
+
hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
|
| 175 |
+
and hook.__class__.__name__ == "WeightNorm"
|
| 176 |
+
):
|
| 177 |
+
torch.nn.utils.remove_weight_norm(self.flows[i * 2])
|
| 178 |
+
|
| 179 |
+
return self
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class ResidualCouplingLayer(torch.nn.Module):
|
| 183 |
+
"""
|
| 184 |
+
Residual coupling layer for flow-based models.
|
| 185 |
+
|
| 186 |
+
Args:
|
| 187 |
+
channels (int): Number of channels.
|
| 188 |
+
hidden_channels (int): Number of hidden channels.
|
| 189 |
+
kernel_size (int): Size of the convolutional kernel.
|
| 190 |
+
dilation_rate (int): Dilation rate of the convolution.
|
| 191 |
+
n_layers (int): Number of convolutional layers.
|
| 192 |
+
p_dropout (float, optional): Dropout probability. Defaults to 0.
|
| 193 |
+
gin_channels (int, optional): Number of conditioning channels. Defaults to 0.
|
| 194 |
+
mean_only (bool, optional): Whether to use mean-only coupling. Defaults to False.
|
| 195 |
+
"""
|
| 196 |
+
|
| 197 |
+
def __init__(
|
| 198 |
+
self,
|
| 199 |
+
channels: int,
|
| 200 |
+
hidden_channels: int,
|
| 201 |
+
kernel_size: int,
|
| 202 |
+
dilation_rate: int,
|
| 203 |
+
n_layers: int,
|
| 204 |
+
p_dropout: float = 0,
|
| 205 |
+
gin_channels: int = 0,
|
| 206 |
+
mean_only: bool = False,
|
| 207 |
+
):
|
| 208 |
+
assert channels % 2 == 0, "channels should be divisible by 2"
|
| 209 |
+
super().__init__()
|
| 210 |
+
self.channels = channels
|
| 211 |
+
self.hidden_channels = hidden_channels
|
| 212 |
+
self.kernel_size = kernel_size
|
| 213 |
+
self.dilation_rate = dilation_rate
|
| 214 |
+
self.n_layers = n_layers
|
| 215 |
+
self.half_channels = channels // 2
|
| 216 |
+
self.mean_only = mean_only
|
| 217 |
+
|
| 218 |
+
self.pre = torch.nn.Conv1d(self.half_channels, hidden_channels, 1)
|
| 219 |
+
self.enc = WaveNet(
|
| 220 |
+
hidden_channels,
|
| 221 |
+
kernel_size,
|
| 222 |
+
dilation_rate,
|
| 223 |
+
n_layers,
|
| 224 |
+
p_dropout=p_dropout,
|
| 225 |
+
gin_channels=gin_channels,
|
| 226 |
+
)
|
| 227 |
+
self.post = torch.nn.Conv1d(
|
| 228 |
+
hidden_channels, self.half_channels * (2 - mean_only), 1
|
| 229 |
+
)
|
| 230 |
+
self.post.weight.data.zero_()
|
| 231 |
+
self.post.bias.data.zero_()
|
| 232 |
+
|
| 233 |
+
def forward(
|
| 234 |
+
self,
|
| 235 |
+
x: torch.Tensor,
|
| 236 |
+
x_mask: torch.Tensor,
|
| 237 |
+
g: Optional[torch.Tensor] = None,
|
| 238 |
+
reverse: bool = False,
|
| 239 |
+
):
|
| 240 |
+
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
| 241 |
+
h = self.pre(x0) * x_mask
|
| 242 |
+
h = self.enc(h, x_mask, g=g)
|
| 243 |
+
stats = self.post(h) * x_mask
|
| 244 |
+
if not self.mean_only:
|
| 245 |
+
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
|
| 246 |
+
else:
|
| 247 |
+
m = stats
|
| 248 |
+
logs = torch.zeros_like(m)
|
| 249 |
+
|
| 250 |
+
if not reverse:
|
| 251 |
+
x1 = m + x1 * torch.exp(logs) * x_mask
|
| 252 |
+
x = torch.cat([x0, x1], 1)
|
| 253 |
+
logdet = torch.sum(logs, [1, 2])
|
| 254 |
+
return x, logdet
|
| 255 |
+
else:
|
| 256 |
+
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
| 257 |
+
x = torch.cat([x0, x1], 1)
|
| 258 |
+
return x
|
| 259 |
+
|
| 260 |
+
def remove_weight_norm(self):
|
| 261 |
+
self.enc.remove_weight_norm()
|
mvsepless/vbach_lib/algorithm/synthesizers.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from typing import Optional
|
| 3 |
+
from .generators.hifigan_mrf import HiFiGANMRFGenerator
|
| 4 |
+
from .generators.hifigan_nsf import HiFiGANNSFGenerator
|
| 5 |
+
from .generators.hifigan import HiFiGANGenerator
|
| 6 |
+
from .generators.refinegan import RefineGANGenerator
|
| 7 |
+
from .commons import slice_segments, rand_slice_segments
|
| 8 |
+
from .residuals import ResidualCouplingBlock
|
| 9 |
+
from .encoders import TextEncoder, PosteriorEncoder
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Synthesizer(torch.nn.Module):
|
| 13 |
+
"""
|
| 14 |
+
Base Synthesizer model.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
spec_channels (int): Number of channels in the spectrogram.
|
| 18 |
+
segment_size (int): Size of the audio segment.
|
| 19 |
+
inter_channels (int): Number of channels in the intermediate layers.
|
| 20 |
+
hidden_channels (int): Number of channels in the hidden layers.
|
| 21 |
+
filter_channels (int): Number of channels in the filter layers.
|
| 22 |
+
n_heads (int): Number of attention heads.
|
| 23 |
+
n_layers (int): Number of layers in the encoder.
|
| 24 |
+
kernel_size (int): Size of the convolution kernel.
|
| 25 |
+
p_dropout (float): Dropout probability.
|
| 26 |
+
resblock (str): Type of residual block.
|
| 27 |
+
resblock_kernel_sizes (list): Kernel sizes for the residual blocks.
|
| 28 |
+
resblock_dilation_sizes (list): Dilation sizes for the residual blocks.
|
| 29 |
+
upsample_rates (list): Upsampling rates for the decoder.
|
| 30 |
+
upsample_initial_channel (int): Number of channels in the initial upsampling layer.
|
| 31 |
+
upsample_kernel_sizes (list): Kernel sizes for the upsampling layers.
|
| 32 |
+
spk_embed_dim (int): Dimension of the speaker embedding.
|
| 33 |
+
gin_channels (int): Number of channels in the global conditioning vector.
|
| 34 |
+
sr (int): Sampling rate of the audio.
|
| 35 |
+
use_f0 (bool): Whether to use F0 information.
|
| 36 |
+
text_enc_hidden_dim (int): Hidden dimension for the text encoder.
|
| 37 |
+
kwargs: Additional keyword arguments.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
spec_channels: int,
|
| 43 |
+
segment_size: int,
|
| 44 |
+
inter_channels: int,
|
| 45 |
+
hidden_channels: int,
|
| 46 |
+
filter_channels: int,
|
| 47 |
+
n_heads: int,
|
| 48 |
+
n_layers: int,
|
| 49 |
+
kernel_size: int,
|
| 50 |
+
p_dropout: float,
|
| 51 |
+
resblock: str,
|
| 52 |
+
resblock_kernel_sizes: list,
|
| 53 |
+
resblock_dilation_sizes: list,
|
| 54 |
+
upsample_rates: list,
|
| 55 |
+
upsample_initial_channel: int,
|
| 56 |
+
upsample_kernel_sizes: list,
|
| 57 |
+
spk_embed_dim: int,
|
| 58 |
+
gin_channels: int,
|
| 59 |
+
sr: int,
|
| 60 |
+
use_f0: bool,
|
| 61 |
+
text_enc_hidden_dim: int = 768,
|
| 62 |
+
vocoder: str = "HiFi-GAN",
|
| 63 |
+
randomized: bool = True,
|
| 64 |
+
checkpointing: bool = False,
|
| 65 |
+
**kwargs,
|
| 66 |
+
):
|
| 67 |
+
super().__init__()
|
| 68 |
+
self.segment_size = segment_size
|
| 69 |
+
self.use_f0 = use_f0
|
| 70 |
+
self.randomized = randomized
|
| 71 |
+
|
| 72 |
+
self.enc_p = TextEncoder(
|
| 73 |
+
inter_channels,
|
| 74 |
+
hidden_channels,
|
| 75 |
+
filter_channels,
|
| 76 |
+
n_heads,
|
| 77 |
+
n_layers,
|
| 78 |
+
kernel_size,
|
| 79 |
+
p_dropout,
|
| 80 |
+
text_enc_hidden_dim,
|
| 81 |
+
f0=use_f0,
|
| 82 |
+
)
|
| 83 |
+
print(f"Using {vocoder} vocoder")
|
| 84 |
+
if use_f0:
|
| 85 |
+
if vocoder == "MRF HiFi-GAN":
|
| 86 |
+
self.dec = HiFiGANMRFGenerator(
|
| 87 |
+
in_channel=inter_channels,
|
| 88 |
+
upsample_initial_channel=upsample_initial_channel,
|
| 89 |
+
upsample_rates=upsample_rates,
|
| 90 |
+
upsample_kernel_sizes=upsample_kernel_sizes,
|
| 91 |
+
resblock_kernel_sizes=resblock_kernel_sizes,
|
| 92 |
+
resblock_dilations=resblock_dilation_sizes,
|
| 93 |
+
gin_channels=gin_channels,
|
| 94 |
+
sample_rate=sr,
|
| 95 |
+
harmonic_num=8,
|
| 96 |
+
checkpointing=checkpointing,
|
| 97 |
+
)
|
| 98 |
+
elif vocoder == "RefineGAN":
|
| 99 |
+
self.dec = RefineGANGenerator(
|
| 100 |
+
sample_rate=sr,
|
| 101 |
+
downsample_rates=upsample_rates[::-1],
|
| 102 |
+
upsample_rates=upsample_rates,
|
| 103 |
+
start_channels=16,
|
| 104 |
+
num_mels=inter_channels,
|
| 105 |
+
checkpointing=checkpointing,
|
| 106 |
+
)
|
| 107 |
+
else:
|
| 108 |
+
self.dec = HiFiGANNSFGenerator(
|
| 109 |
+
inter_channels,
|
| 110 |
+
resblock_kernel_sizes,
|
| 111 |
+
resblock_dilation_sizes,
|
| 112 |
+
upsample_rates,
|
| 113 |
+
upsample_initial_channel,
|
| 114 |
+
upsample_kernel_sizes,
|
| 115 |
+
gin_channels=gin_channels,
|
| 116 |
+
sr=sr,
|
| 117 |
+
checkpointing=checkpointing,
|
| 118 |
+
)
|
| 119 |
+
else:
|
| 120 |
+
if vocoder == "MRF HiFi-GAN":
|
| 121 |
+
print("MRF HiFi-GAN does not support training without pitch guidance.")
|
| 122 |
+
self.dec = None
|
| 123 |
+
elif vocoder == "RefineGAN":
|
| 124 |
+
print("RefineGAN does not support training without pitch guidance.")
|
| 125 |
+
self.dec = None
|
| 126 |
+
else:
|
| 127 |
+
self.dec = HiFiGANGenerator(
|
| 128 |
+
inter_channels,
|
| 129 |
+
resblock_kernel_sizes,
|
| 130 |
+
resblock_dilation_sizes,
|
| 131 |
+
upsample_rates,
|
| 132 |
+
upsample_initial_channel,
|
| 133 |
+
upsample_kernel_sizes,
|
| 134 |
+
gin_channels=gin_channels,
|
| 135 |
+
)
|
| 136 |
+
self.enc_q = PosteriorEncoder(
|
| 137 |
+
spec_channels,
|
| 138 |
+
inter_channels,
|
| 139 |
+
hidden_channels,
|
| 140 |
+
5,
|
| 141 |
+
1,
|
| 142 |
+
16,
|
| 143 |
+
gin_channels=gin_channels,
|
| 144 |
+
)
|
| 145 |
+
self.flow = ResidualCouplingBlock(
|
| 146 |
+
inter_channels,
|
| 147 |
+
hidden_channels,
|
| 148 |
+
5,
|
| 149 |
+
1,
|
| 150 |
+
3,
|
| 151 |
+
gin_channels=gin_channels,
|
| 152 |
+
)
|
| 153 |
+
self.emb_g = torch.nn.Embedding(spk_embed_dim, gin_channels)
|
| 154 |
+
|
| 155 |
+
def _remove_weight_norm_from(self, module):
|
| 156 |
+
for hook in module._forward_pre_hooks.values():
|
| 157 |
+
if getattr(hook, "__class__", None).__name__ == "WeightNorm":
|
| 158 |
+
torch.nn.utils.remove_weight_norm(module)
|
| 159 |
+
|
| 160 |
+
def remove_weight_norm(self):
|
| 161 |
+
for module in [self.dec, self.flow, self.enc_q]:
|
| 162 |
+
self._remove_weight_norm_from(module)
|
| 163 |
+
|
| 164 |
+
def __prepare_scriptable__(self):
|
| 165 |
+
self.remove_weight_norm()
|
| 166 |
+
return self
|
| 167 |
+
|
| 168 |
+
def forward(
|
| 169 |
+
self,
|
| 170 |
+
phone: torch.Tensor,
|
| 171 |
+
phone_lengths: torch.Tensor,
|
| 172 |
+
pitch: Optional[torch.Tensor] = None,
|
| 173 |
+
pitchf: Optional[torch.Tensor] = None,
|
| 174 |
+
y: Optional[torch.Tensor] = None,
|
| 175 |
+
y_lengths: Optional[torch.Tensor] = None,
|
| 176 |
+
ds: Optional[torch.Tensor] = None,
|
| 177 |
+
):
|
| 178 |
+
g = self.emb_g(ds).unsqueeze(-1)
|
| 179 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
| 180 |
+
|
| 181 |
+
if y is not None:
|
| 182 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
| 183 |
+
z_p = self.flow(z, y_mask, g=g)
|
| 184 |
+
# regular old training method using random slices
|
| 185 |
+
if self.randomized:
|
| 186 |
+
z_slice, ids_slice = rand_slice_segments(
|
| 187 |
+
z, y_lengths, self.segment_size
|
| 188 |
+
)
|
| 189 |
+
if self.use_f0:
|
| 190 |
+
pitchf = slice_segments(pitchf, ids_slice, self.segment_size, 2)
|
| 191 |
+
o = self.dec(z_slice, pitchf, g=g)
|
| 192 |
+
else:
|
| 193 |
+
o = self.dec(z_slice, g=g)
|
| 194 |
+
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
| 195 |
+
# future use for finetuning using the entire dataset each pass
|
| 196 |
+
else:
|
| 197 |
+
if self.use_f0:
|
| 198 |
+
o = self.dec(z, pitchf, g=g)
|
| 199 |
+
else:
|
| 200 |
+
o = self.dec(z, g=g)
|
| 201 |
+
return o, None, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
| 202 |
+
else:
|
| 203 |
+
return None, None, x_mask, None, (None, None, m_p, logs_p, None, None)
|
| 204 |
+
|
| 205 |
+
@torch.jit.export
|
| 206 |
+
def infer(
|
| 207 |
+
self,
|
| 208 |
+
phone: torch.Tensor,
|
| 209 |
+
phone_lengths: torch.Tensor,
|
| 210 |
+
pitch: Optional[torch.Tensor] = None,
|
| 211 |
+
nsff0: Optional[torch.Tensor] = None,
|
| 212 |
+
sid: torch.Tensor = None,
|
| 213 |
+
rate: Optional[torch.Tensor] = None,
|
| 214 |
+
):
|
| 215 |
+
"""
|
| 216 |
+
Inference of the model.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
phone (torch.Tensor): Phoneme sequence.
|
| 220 |
+
phone_lengths (torch.Tensor): Lengths of the phoneme sequences.
|
| 221 |
+
pitch (torch.Tensor, optional): Pitch sequence.
|
| 222 |
+
nsff0 (torch.Tensor, optional): Fine-grained pitch sequence.
|
| 223 |
+
sid (torch.Tensor): Speaker embedding.
|
| 224 |
+
rate (torch.Tensor, optional): Rate for time-stretching.
|
| 225 |
+
"""
|
| 226 |
+
g = self.emb_g(sid).unsqueeze(-1)
|
| 227 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
| 228 |
+
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
| 229 |
+
|
| 230 |
+
if rate is not None:
|
| 231 |
+
head = int(z_p.shape[2] * (1.0 - rate.item()))
|
| 232 |
+
z_p, x_mask = z_p[:, :, head:], x_mask[:, :, head:]
|
| 233 |
+
if self.use_f0 and nsff0 is not None:
|
| 234 |
+
nsff0 = nsff0[:, head:]
|
| 235 |
+
|
| 236 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
| 237 |
+
o = (
|
| 238 |
+
self.dec(z * x_mask, nsff0, g=g)
|
| 239 |
+
if self.use_f0
|
| 240 |
+
else self.dec(z * x_mask, g=g)
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
return o, x_mask, (z, z_p, m_p, logs_p)
|