Spaces:
Runtime error
Runtime error
Upload 55 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +153 -0
- LICENSE +21 -0
- README.md +13 -13
- README_zh_CN.md +185 -0
- app.py +69 -0
- baidu.py +29 -0
- cluster/__init__.py +29 -0
- cluster/train_cluster.py +89 -0
- configs/config.json +0 -0
- configs/momoi.json +93 -0
- configs_template/config_template.json +65 -0
- data_utils.py +142 -0
- dataset_raw/wav_structure.txt +20 -0
- filelists/test.txt +4 -0
- filelists/train.txt +15 -0
- filelists/val.txt +2 -0
- flask_api.py +56 -0
- flask_api_full_song.py +55 -0
- hubert/__init__.py +0 -0
- hubert/checkpoint_best_legacy_500.pt +3 -0
- hubert/hubert_model.py +222 -0
- hubert/hubert_model_onnx.py +217 -0
- hubert/put_hubert_ckpt_here +0 -0
- inference/__init__.py +0 -0
- inference/infer_tool.py +251 -0
- inference/infer_tool_grad.py +160 -0
- inference/slicer.py +142 -0
- inference_main.py +101 -0
- logs/44k/momoi_E13_G40.pth +3 -0
- logs/44k/put_pretrained_model_here +0 -0
- models.py +420 -0
- modules/__init__.py +0 -0
- modules/attentions.py +349 -0
- modules/commons.py +188 -0
- modules/losses.py +61 -0
- modules/mel_processing.py +112 -0
- modules/modules.py +342 -0
- onnx_export.py +53 -0
- onnxexport/model_onnx.py +335 -0
- preprocess_flist_config.py +83 -0
- preprocess_hubert_f0.py +62 -0
- raw/put_raw_wav_here +0 -0
- requirements.txt +18 -0
- requirements_win.txt +21 -0
- resample.py +48 -0
- spec_gen.py +22 -0
- train.py +310 -0
- utils.py +502 -0
- vdecoder/__init__.py +0 -0
- vdecoder/hifigan/env.py +15 -0
.gitignore
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Created by https://www.toptal.com/developers/gitignore/api/python
|
| 3 |
+
# Edit at https://www.toptal.com/developers/gitignore?templates=python
|
| 4 |
+
|
| 5 |
+
### Python ###
|
| 6 |
+
# Byte-compiled / optimized / DLL files
|
| 7 |
+
__pycache__/
|
| 8 |
+
*.py[cod]
|
| 9 |
+
*$py.class
|
| 10 |
+
|
| 11 |
+
# C extensions
|
| 12 |
+
*.so
|
| 13 |
+
|
| 14 |
+
# Distribution / packaging
|
| 15 |
+
.Python
|
| 16 |
+
build/
|
| 17 |
+
develop-eggs/
|
| 18 |
+
dist/
|
| 19 |
+
downloads/
|
| 20 |
+
eggs/
|
| 21 |
+
.eggs/
|
| 22 |
+
lib/
|
| 23 |
+
lib64/
|
| 24 |
+
parts/
|
| 25 |
+
sdist/
|
| 26 |
+
var/
|
| 27 |
+
wheels/
|
| 28 |
+
pip-wheel-metadata/
|
| 29 |
+
share/python-wheels/
|
| 30 |
+
*.egg-info/
|
| 31 |
+
.installed.cfg
|
| 32 |
+
*.egg
|
| 33 |
+
MANIFEST
|
| 34 |
+
|
| 35 |
+
# PyInstaller
|
| 36 |
+
# Usually these files are written by a python script from a template
|
| 37 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 38 |
+
*.manifest
|
| 39 |
+
*.spec
|
| 40 |
+
|
| 41 |
+
# Installer logs
|
| 42 |
+
pip-log.txt
|
| 43 |
+
pip-delete-this-directory.txt
|
| 44 |
+
|
| 45 |
+
# Unit test / coverage reports
|
| 46 |
+
htmlcov/
|
| 47 |
+
.tox/
|
| 48 |
+
.nox/
|
| 49 |
+
.coverage
|
| 50 |
+
.coverage.*
|
| 51 |
+
.cache
|
| 52 |
+
nosetests.xml
|
| 53 |
+
coverage.xml
|
| 54 |
+
*.cover
|
| 55 |
+
*.py,cover
|
| 56 |
+
.hypothesis/
|
| 57 |
+
.pytest_cache/
|
| 58 |
+
pytestdebug.log
|
| 59 |
+
|
| 60 |
+
# Translations
|
| 61 |
+
*.mo
|
| 62 |
+
*.pot
|
| 63 |
+
|
| 64 |
+
# Django stuff:
|
| 65 |
+
*.log
|
| 66 |
+
local_settings.py
|
| 67 |
+
db.sqlite3
|
| 68 |
+
db.sqlite3-journal
|
| 69 |
+
|
| 70 |
+
# Flask stuff:
|
| 71 |
+
instance/
|
| 72 |
+
.webassets-cache
|
| 73 |
+
|
| 74 |
+
# Scrapy stuff:
|
| 75 |
+
.scrapy
|
| 76 |
+
|
| 77 |
+
# Sphinx documentation
|
| 78 |
+
docs/_build/
|
| 79 |
+
doc/_build/
|
| 80 |
+
|
| 81 |
+
# PyBuilder
|
| 82 |
+
target/
|
| 83 |
+
|
| 84 |
+
# Jupyter Notebook
|
| 85 |
+
.ipynb_checkpoints
|
| 86 |
+
|
| 87 |
+
# IPython
|
| 88 |
+
profile_default/
|
| 89 |
+
ipython_config.py
|
| 90 |
+
|
| 91 |
+
# pyenv
|
| 92 |
+
.python-version
|
| 93 |
+
|
| 94 |
+
# pipenv
|
| 95 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 96 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 97 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 98 |
+
# install all needed dependencies.
|
| 99 |
+
#Pipfile.lock
|
| 100 |
+
|
| 101 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 102 |
+
__pypackages__/
|
| 103 |
+
|
| 104 |
+
# Celery stuff
|
| 105 |
+
celerybeat-schedule
|
| 106 |
+
celerybeat.pid
|
| 107 |
+
|
| 108 |
+
# SageMath parsed files
|
| 109 |
+
*.sage.py
|
| 110 |
+
|
| 111 |
+
# Environments
|
| 112 |
+
.env
|
| 113 |
+
.venv
|
| 114 |
+
env/
|
| 115 |
+
venv/
|
| 116 |
+
ENV/
|
| 117 |
+
env.bak/
|
| 118 |
+
venv.bak/
|
| 119 |
+
|
| 120 |
+
# Spyder project settings
|
| 121 |
+
.spyderproject
|
| 122 |
+
.spyproject
|
| 123 |
+
|
| 124 |
+
# Rope project settings
|
| 125 |
+
.ropeproject
|
| 126 |
+
|
| 127 |
+
# mkdocs documentation
|
| 128 |
+
/site
|
| 129 |
+
|
| 130 |
+
# mypy
|
| 131 |
+
.mypy_cache/
|
| 132 |
+
.dmypy.json
|
| 133 |
+
dmypy.json
|
| 134 |
+
|
| 135 |
+
# Pyre type checker
|
| 136 |
+
.pyre/
|
| 137 |
+
|
| 138 |
+
# pytype static type analyzer
|
| 139 |
+
.pytype/
|
| 140 |
+
|
| 141 |
+
# End of https://www.toptal.com/developers/gitignore/api/python
|
| 142 |
+
|
| 143 |
+
dataset
|
| 144 |
+
dataset_raw
|
| 145 |
+
raw
|
| 146 |
+
results
|
| 147 |
+
inference/chunks_temp.json
|
| 148 |
+
logs
|
| 149 |
+
hubert/checkpoint_best_legacy_500.pt
|
| 150 |
+
configs/config.json
|
| 151 |
+
filelists/test.txt
|
| 152 |
+
filelists/train.txt
|
| 153 |
+
filelists/val.txt
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2021 Jingyi Li
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
CHANGED
|
@@ -1,13 +1,13 @@
|
|
| 1 |
-
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom: green
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
-
license: mit
|
| 11 |
-
---
|
| 12 |
-
|
| 13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: qwe
|
| 3 |
+
emoji: ⚡
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: yellow
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 4.38.1
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
license: mit
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
README_zh_CN.md
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SoftVC VITS Singing Voice Conversion
|
| 2 |
+
|
| 3 |
+
[**English**](./README.md) | [**中文简体**](./README_zh_CN.md)
|
| 4 |
+
|
| 5 |
+
## 使用规约
|
| 6 |
+
|
| 7 |
+
1. 本项目是基于学术交流目的建立,仅供交流与学习使用,并非为生产环境准备,请自行解决数据集的授权问题,任何由于使用非授权数据集进行训练造成的问题,需自行承担全部责任和一切后果!
|
| 8 |
+
2. 任何发布到视频平台的基于 sovits 制作的视频,都必须要在简介明确指明用于变声器转换的输入源歌声、音频,例如:使用他人发布的视频 / 音频,通过分离的人声作为输入源进行转换的,必须要给出明确的原视频、音乐链接;若使用是自己的人声,或是使用其他歌声合成引擎合成的声音作为输入源进行转换的,也必须在简介加以说明。
|
| 9 |
+
3. 由输入源造成的侵权问题需自行承担全部责任和一切后果。使用其他商用歌声合成软件作为输入源时,请确保遵守该软件的使用条例,注意,许多歌声合成引擎使用条例中明确指明不可用于输入源进行转换!
|
| 10 |
+
4. 继续使用视为已同意本仓库 README 所述相关条例,本仓库 README 已进行劝导义务,不对后续可能存在问题负责。
|
| 11 |
+
5. 如将本仓库代码二次分发,或将由此项目产出的任何结果公开发表 (包括但不限于视频网站投稿),请注明原作者及代码来源 (此仓库)。
|
| 12 |
+
6. 如果将此项目用于任何其他企划,请提前联系并告知本仓库作者,十分感谢。
|
| 13 |
+
|
| 14 |
+
### 改善了交互的一个分支推荐:[34j/so-vits-svc-fork](https://github.com/34j/so-vits-svc-fork)
|
| 15 |
+
|
| 16 |
+
## update
|
| 17 |
+
|
| 18 |
+
> 更新了4.0-v2模型,全部流程同4.0,相比4.0在部分场景下有一定提升,但也有些情况有退步,具体可移步[4.0-v2分支](https://github.com/svc-develop-team/so-vits-svc/tree/4.0-v2)
|
| 19 |
+
|
| 20 |
+
## 模型简介
|
| 21 |
+
|
| 22 |
+
歌声音色转换模型,通过SoftVC内容编码器提取源音频语音特征,与F0同时输入VITS替换原本的文本输入达到歌声转换的效果。同时,更换声码器为 [NSF HiFiGAN](https://github.com/openvpi/DiffSinger/tree/refactor/modules/nsf_hifigan) 解决断音问题
|
| 23 |
+
|
| 24 |
+
### 4.0版本更新内容
|
| 25 |
+
|
| 26 |
+
+ 特征输入更换为 [Content Vec](https://github.com/auspicious3000/contentvec)
|
| 27 |
+
+ 采样率统一使用44100hz
|
| 28 |
+
+ 由于更改了hop size等参数以及精简了部分模型结构,推理所需显存占用**大幅降低**,4.0版本44khz显存占用甚至小于3.0版本的32khz
|
| 29 |
+
+ 调整了部分代码结构
|
| 30 |
+
+ 数据集制作、训练过程和3.0保持一致,但模型完全不通用,数据集也需要全部重新预处理
|
| 31 |
+
+ 增加了可选项 1:vc模式自动预测音高f0,即转换语音时不需要手动输入变调key,男女声的调能自动转换,但仅限语音转换,该模式转换歌声会跑调
|
| 32 |
+
+ 增加了可选项 2:通过kmeans聚类方案减小音色泄漏,即使得音色更加像目标音色
|
| 33 |
+
|
| 34 |
+
## 预先下载的模型文件
|
| 35 |
+
|
| 36 |
+
#### **必须项**
|
| 37 |
+
|
| 38 |
+
+ contentvec :[checkpoint_best_legacy_500.pt](https://ibm.box.com/s/z1wgl1stco8ffooyatzdwsqn2psd9lrr)
|
| 39 |
+
+ 放在`hubert`目录下
|
| 40 |
+
|
| 41 |
+
```shell
|
| 42 |
+
# contentvec
|
| 43 |
+
http://obs.cstcloud.cn/share/obs/sankagenkeshi/checkpoint_best_legacy_500.pt
|
| 44 |
+
# 也可手动下载放在hubert目录
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
#### **可选项(强烈建议使用)**
|
| 48 |
+
|
| 49 |
+
+ 预训练底模文件: `G_0.pth` `D_0.pth`
|
| 50 |
+
+ 放在`logs/44k`目录下
|
| 51 |
+
|
| 52 |
+
从svc-develop-team(待定)或任何其他地方获取
|
| 53 |
+
|
| 54 |
+
虽然底模一般不会引起什么版权问题,但还是请注意一下,比如事先询问作者,又或者作者在模型描述中明确写明了可行的用途
|
| 55 |
+
|
| 56 |
+
## 数据集准备
|
| 57 |
+
|
| 58 |
+
仅需要以以下文件结构将数据集放入dataset_raw目录即可
|
| 59 |
+
|
| 60 |
+
```shell
|
| 61 |
+
dataset_raw
|
| 62 |
+
├───speaker0
|
| 63 |
+
│ ├───xxx1-xxx1.wav
|
| 64 |
+
│ ├───...
|
| 65 |
+
│ └───Lxx-0xx8.wav
|
| 66 |
+
└───speaker1
|
| 67 |
+
├───xx2-0xxx2.wav
|
| 68 |
+
├───...
|
| 69 |
+
└───xxx7-xxx007.wav
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
## 数据预处理
|
| 73 |
+
|
| 74 |
+
1. 重采样至 44100hz
|
| 75 |
+
|
| 76 |
+
```shell
|
| 77 |
+
python resample.py
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
2. 自动划分训练集 验证集 测试集 以及自动生成配置文件
|
| 81 |
+
|
| 82 |
+
```shell
|
| 83 |
+
python preprocess_flist_config.py
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
3. 生成hubert与f0
|
| 87 |
+
|
| 88 |
+
```shell
|
| 89 |
+
python preprocess_hubert_f0.py
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
执行完以上步骤后 dataset 目录便是预处理完成的数据,可以删除dataset_raw文件夹了
|
| 93 |
+
|
| 94 |
+
## 训练
|
| 95 |
+
|
| 96 |
+
```shell
|
| 97 |
+
python train.py -c configs/config.json -m 44k
|
| 98 |
+
```
|
| 99 |
+
注:训练时会自动清除老的模型,只保留最新3个模型,如果想防止过拟合需要自己手动备份模型记录点,或修改配置文件keep_ckpts 0为永不清除
|
| 100 |
+
|
| 101 |
+
## 推理
|
| 102 |
+
|
| 103 |
+
使用 [inference_main.py](inference_main.py)
|
| 104 |
+
|
| 105 |
+
截止此处,4.0使用方法(训练、推理)和3.0完全一致,没有任何变化(推理增加了命令行支持)
|
| 106 |
+
|
| 107 |
+
```shell
|
| 108 |
+
# 例
|
| 109 |
+
python inference_main.py -m "logs/44k/G_30400.pth" -c "configs/config.json" -n "君の知らない物語-src.wav" -t 0 -s "nen"
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
必填项部分
|
| 113 |
+
+ -m, --model_path:模型路径。
|
| 114 |
+
+ -c, --config_path:配置文件路径。
|
| 115 |
+
+ -n, --clean_names:wav 文件名列表,放在 raw 文件夹下。
|
| 116 |
+
+ -t, --trans:音高调整,支持正负(半音)。
|
| 117 |
+
+ -s, --spk_list:合成目标说话人名称。
|
| 118 |
+
|
| 119 |
+
可选项部分:见下一节
|
| 120 |
+
+ -a, --auto_predict_f0:语音转换自动预测音高,转换歌声时不要打开这个会严重跑调。
|
| 121 |
+
+ -cm, --cluster_model_path:聚类模型路径,如果没有训练聚类则随便填。
|
| 122 |
+
+ -cr, --cluster_infer_ratio:聚类方案占比,范围 0-1,若没有训练聚类模型则填 0 即可。
|
| 123 |
+
|
| 124 |
+
## 可选项
|
| 125 |
+
|
| 126 |
+
如果前面的效果已经满意,或者没看明白下面在讲啥,那后面的内容都可以忽略,不影响模型使用(这些可选项影响比较小,可能在某些特定数据上有点效果,但大部分情况似乎都感知不太明显)
|
| 127 |
+
|
| 128 |
+
### 自动f0预测
|
| 129 |
+
|
| 130 |
+
4.0模型训练过程会训练一个f0预测器,对于语音转换可以开启自动音高预测,如果效果不好也可以使用手动的,但转换歌声时请不要启用此功能!!!会严重跑调!!
|
| 131 |
+
+ 在inference_main中设置auto_predict_f0为true即可
|
| 132 |
+
|
| 133 |
+
### 聚类音色泄漏控制
|
| 134 |
+
|
| 135 |
+
介绍:聚类方案可以减小音色泄漏,使得模型训练出来更像目标的音色(但其实不是特别明显),但是单纯的聚类方案会降低模型的咬字(会口齿不清)(这个很明显),本模型采用了融合的方式,
|
| 136 |
+
可以线性控制聚类方案与非聚类方案的占比,也就是可以手动在"像目标音色" 和 "咬字清晰" 之间调整比例,找到合适的折中点。
|
| 137 |
+
|
| 138 |
+
使用聚类前面的已有步骤不用进行任何的变动,只需要额外训练一个聚类模型,虽然效果比较有限,但训练成本也比较低
|
| 139 |
+
|
| 140 |
+
+ 训练过程:
|
| 141 |
+
+ 使用cpu性能较好的机器训练,据我的经验在腾讯云6核cpu训练每个speaker需要约4分钟即可完成训练
|
| 142 |
+
+ 执行python cluster/train_cluster.py ,模型的输出会在 logs/44k/kmeans_10000.pt
|
| 143 |
+
+ 推理过程:
|
| 144 |
+
+ inference_main中指定cluster_model_path
|
| 145 |
+
+ inference_main中指定cluster_infer_ratio,0为完全不使用聚类,1为只使用聚类,通常设置0.5即可
|
| 146 |
+
|
| 147 |
+
### [](https://colab.research.google.com/drive/1kv-3y2DmZo0uya8pEr1xk7cSB-4e_Pct?usp=sharing) [sovits4_for_colab.ipynb](https://colab.research.google.com/drive/1kv-3y2DmZo0uya8pEr1xk7cSB-4e_Pct?usp=sharing)
|
| 148 |
+
|
| 149 |
+
#### [23/03/16] 不再需要手动下载hubert
|
| 150 |
+
|
| 151 |
+
## Onnx导出
|
| 152 |
+
|
| 153 |
+
使用 [onnx_export.py](onnx_export.py)
|
| 154 |
+
+ 新建文件夹:`checkpoints` 并打开
|
| 155 |
+
+ 在`checkpoints`文件夹中新建一个文件夹作为项目文件夹,文件夹名为你的项目名称,比如`aziplayer`
|
| 156 |
+
+ 将你的模型更名为`model.pth`,配置文件更名为`config.json`,并放置到刚才创建的`aziplayer`文件夹下
|
| 157 |
+
+ 将 [onnx_export.py](onnx_export.py) 中`path = "NyaruTaffy"` 的 `"NyaruTaffy"` 修改为你的项目名称,`path = "aziplayer"`
|
| 158 |
+
+ 运行 [onnx_export.py](onnx_export.py)
|
| 159 |
+
+ 等待执行完毕,在你的项目文件夹下会生成一个`model.onnx`,即为导出的模型
|
| 160 |
+
|
| 161 |
+
### Onnx模型支持的UI
|
| 162 |
+
|
| 163 |
+
+ [MoeSS](https://github.com/NaruseMioShirakana/MoeSS)
|
| 164 |
+
+ 我去除了所有的训练用函数和一切复杂的转置,一行都没有保留,因为我认为只有去除了这些东西,才知道你用的是Onnx
|
| 165 |
+
+ 注意:Hubert Onnx模型请使用MoeSS提供的模型,目前无法自行导出(fairseq中Hubert有不少onnx不支持的算子和涉及到常量的东西,在导出时会报错或者导出的模型输入输出shape和结果都有问题)
|
| 166 |
+
[Hubert4.0](https://huggingface.co/NaruseMioShirakana/MoeSS-SUBModel)
|
| 167 |
+
|
| 168 |
+
## 一些法律条例参考
|
| 169 |
+
|
| 170 |
+
#### 《民法典》
|
| 171 |
+
|
| 172 |
+
##### 第一千零一十九条
|
| 173 |
+
|
| 174 |
+
任何组织或者个人不得以丑化、污损,或者利用信息技术手段伪造等方式侵害他人的肖像权。未经肖像权人同意,不得制作、使用、公开肖像权人的肖像,但是法律另有规定的除外。
|
| 175 |
+
未经肖像权人同意,肖像作品权利人不得以发表、复制、发行、出租、展览等方式使用或者公开肖像权人的肖像。
|
| 176 |
+
对自然人声音的保护,参照适用肖像权保护的有关规定。
|
| 177 |
+
|
| 178 |
+
##### 第一千零二十四条
|
| 179 |
+
|
| 180 |
+
【名誉权】民事主体享有名誉权。任何组织或者个人不得以侮辱、诽谤等方式侵害他人的名誉权。
|
| 181 |
+
|
| 182 |
+
##### 第一千零二十七条
|
| 183 |
+
|
| 184 |
+
【作品侵害名誉权】行为人发表的文学、艺术作品以真人真事或者特定人为描述对象,含有侮辱、诽谤内容,侵害他人名誉权的,受害人有权依法请求该行为人承担民事责任。
|
| 185 |
+
行为人发表的文学、艺术作品不以特定人为描述对象,仅其中的情节与该特定人的情况相似的,不承担民事责任。
|
app.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
# os.system("wget -P cvec/ https://huggingface.co/spaces/innnky/nanami/resolve/main/checkpoint_best_legacy_500.pt")
|
| 5 |
+
import gradio as gr
|
| 6 |
+
import librosa
|
| 7 |
+
import numpy as np
|
| 8 |
+
import soundfile
|
| 9 |
+
from inference.infer_tool import Svc
|
| 10 |
+
import logging
|
| 11 |
+
|
| 12 |
+
logging.getLogger('numba').setLevel(logging.WARNING)
|
| 13 |
+
logging.getLogger('markdown_it').setLevel(logging.WARNING)
|
| 14 |
+
logging.getLogger('urllib3').setLevel(logging.WARNING)
|
| 15 |
+
logging.getLogger('matplotlib').setLevel(logging.WARNING)
|
| 16 |
+
|
| 17 |
+
config_path = "configs/momoi.json"
|
| 18 |
+
|
| 19 |
+
model = Svc("logs/44k/momoi_E13_G40.pth", "configs/momoi.json", cluster_model_path="logs/44k/kmeans_10000.pt")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def vc_fn(sid, input_audio, vc_transform, auto_f0,cluster_ratio, slice_db, noise_scale):
|
| 24 |
+
if input_audio is None:
|
| 25 |
+
return "You need to upload an audio", None
|
| 26 |
+
sampling_rate, audio = input_audio
|
| 27 |
+
# print(audio.shape,sampling_rate)
|
| 28 |
+
duration = audio.shape[0] / sampling_rate
|
| 29 |
+
if duration > 100000000000000:
|
| 30 |
+
return "请上传小于90s的音频,需要转换长音频请本地进行转换", None
|
| 31 |
+
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
|
| 32 |
+
if len(audio.shape) > 1:
|
| 33 |
+
audio = librosa.to_mono(audio.transpose(1, 0))
|
| 34 |
+
if sampling_rate != 16000:
|
| 35 |
+
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
|
| 36 |
+
print(audio.shape)
|
| 37 |
+
out_wav_path = "temp.wav"
|
| 38 |
+
soundfile.write(out_wav_path, audio, 16000, format="wav")
|
| 39 |
+
print( cluster_ratio, auto_f0, noise_scale)
|
| 40 |
+
_audio = model.slice_inference(out_wav_path, sid, vc_transform, slice_db, cluster_ratio, auto_f0, noise_scale)
|
| 41 |
+
return "Success", (44100, _audio)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
app = gr.Blocks()
|
| 45 |
+
with app:
|
| 46 |
+
with gr.Tabs():
|
| 47 |
+
with gr.TabItem("Basic"):
|
| 48 |
+
gr.Markdown(value="""
|
| 49 |
+
sovits4.0 在线demo
|
| 50 |
+
|
| 51 |
+
此demo为预训练底模在线demo,使用数据:云灏 即霜 辉宇·星AI 派蒙 绫地宁宁
|
| 52 |
+
""")
|
| 53 |
+
spks = list(model.spk2id.keys())
|
| 54 |
+
sid = gr.Dropdown(label="音色", choices=spks, value=spks[0])
|
| 55 |
+
vc_input3 = gr.Audio(label="上传音频(长度小于90秒)")
|
| 56 |
+
vc_transform = gr.Number(label="变调(整数,可以正负,半音数量,升高八度就是12)", value=0)
|
| 57 |
+
cluster_ratio = gr.Number(label="聚类模型混合比例,0-1之间,默认为0不启用聚类,能提升音色相似度,但会导致咬字下降(如果使用建议0.5左右)", value=0)
|
| 58 |
+
auto_f0 = gr.Checkbox(label="自动f0预测,配合聚类模型f0预测效果更好,会导致变调功能失效(仅限转换语音,歌声不要勾选此项会究极跑调)", value=False)
|
| 59 |
+
slice_db = gr.Number(label="切片阈值", value=-40)
|
| 60 |
+
noise_scale = gr.Number(label="noise_scale 建议不要动,会影响音质,玄学参数", value=0.4)
|
| 61 |
+
vc_submit = gr.Button("转换", variant="primary")
|
| 62 |
+
vc_output1 = gr.Textbox(label="Output Message")
|
| 63 |
+
vc_output2 = gr.Audio(label="Output Audio")
|
| 64 |
+
vc_submit.click(vc_fn, [sid, vc_input3, vc_transform,auto_f0,cluster_ratio, slice_db, noise_scale], [vc_output1, vc_output2])
|
| 65 |
+
|
| 66 |
+
app.launch()
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
baidu.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from inference_main import main #引用语音推理方法
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
# 1.导入库 pip install baidu-aip
|
| 5 |
+
from aip import AipSpeech
|
| 6 |
+
|
| 7 |
+
if __name__=="__main__":
|
| 8 |
+
# 2.初始化AipSpeech对象
|
| 9 |
+
App_ID = '31464582'
|
| 10 |
+
API_Key = 'uNi0i8CDyLKpqgtQx1pBA6Pi'
|
| 11 |
+
Secret_Key = '3WNDDfPyx2ChrYmUnmPL6zRa4gyK2m8y'
|
| 12 |
+
|
| 13 |
+
# 相当于3把钥匙
|
| 14 |
+
client = AipSpeech(App_ID, API_Key, Secret_Key)
|
| 15 |
+
|
| 16 |
+
# 3.调用语音合成的方法
|
| 17 |
+
str = '大家好,我是人工智能静芬,可以给你们唱歌哦'
|
| 18 |
+
# 音频文件流
|
| 19 |
+
result = client.synthesis(str, "zh", 1, {"per": 0}) #per:度小宇=1,度小美=0,度逍遥(基础)=3,度丫丫=4,精品音库:度逍遥(精品)=5003,度小鹿=5118,度博文=106,度小童=110,度小萌=111,度米朵=103,度小娇=5
|
| 20 |
+
# print(result)
|
| 21 |
+
|
| 22 |
+
# 识别正确返回语音二进制 错误则返回dict 参照错误码
|
| 23 |
+
#4.保存音频文件
|
| 24 |
+
if not isinstance(result, dict):
|
| 25 |
+
with open('./raw/audio.mp3', 'wb') as f:
|
| 26 |
+
f.write(result)
|
| 27 |
+
|
| 28 |
+
#5 将百度音频转换为静芬语音 1
|
| 29 |
+
os.system('python inference_main.py -m "logs/44k/G_24000.pth" -c "configs/config.json" -n "audio.mp3" -t 2 -s "jingfen"')
|
cluster/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
from sklearn.cluster import KMeans
|
| 4 |
+
|
| 5 |
+
def get_cluster_model(ckpt_path):
|
| 6 |
+
checkpoint = torch.load(ckpt_path)
|
| 7 |
+
kmeans_dict = {}
|
| 8 |
+
for spk, ckpt in checkpoint.items():
|
| 9 |
+
km = KMeans(ckpt["n_features_in_"])
|
| 10 |
+
km.__dict__["n_features_in_"] = ckpt["n_features_in_"]
|
| 11 |
+
km.__dict__["_n_threads"] = ckpt["_n_threads"]
|
| 12 |
+
km.__dict__["cluster_centers_"] = ckpt["cluster_centers_"]
|
| 13 |
+
kmeans_dict[spk] = km
|
| 14 |
+
return kmeans_dict
|
| 15 |
+
|
| 16 |
+
def get_cluster_result(model, x, speaker):
|
| 17 |
+
"""
|
| 18 |
+
x: np.array [t, 256]
|
| 19 |
+
return cluster class result
|
| 20 |
+
"""
|
| 21 |
+
return model[speaker].predict(x)
|
| 22 |
+
|
| 23 |
+
def get_cluster_center_result(model, x,speaker):
|
| 24 |
+
"""x: np.array [t, 256]"""
|
| 25 |
+
predict = model[speaker].predict(x)
|
| 26 |
+
return model[speaker].cluster_centers_[predict]
|
| 27 |
+
|
| 28 |
+
def get_center(model, x,speaker):
|
| 29 |
+
return model[speaker].cluster_centers_[x]
|
cluster/train_cluster.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from glob import glob
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
import torch
|
| 5 |
+
import logging
|
| 6 |
+
import argparse
|
| 7 |
+
import torch
|
| 8 |
+
import numpy as np
|
| 9 |
+
from sklearn.cluster import KMeans, MiniBatchKMeans
|
| 10 |
+
import tqdm
|
| 11 |
+
logging.basicConfig(level=logging.INFO)
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
import time
|
| 14 |
+
import random
|
| 15 |
+
|
| 16 |
+
def train_cluster(in_dir, n_clusters, use_minibatch=True, verbose=False):
|
| 17 |
+
|
| 18 |
+
logger.info(f"Loading features from {in_dir}")
|
| 19 |
+
features = []
|
| 20 |
+
nums = 0
|
| 21 |
+
for path in tqdm.tqdm(in_dir.glob("*.soft.pt")):
|
| 22 |
+
features.append(torch.load(path).squeeze(0).numpy().T)
|
| 23 |
+
# print(features[-1].shape)
|
| 24 |
+
features = np.concatenate(features, axis=0)
|
| 25 |
+
print(nums, features.nbytes/ 1024**2, "MB , shape:",features.shape, features.dtype)
|
| 26 |
+
features = features.astype(np.float32)
|
| 27 |
+
logger.info(f"Clustering features of shape: {features.shape}")
|
| 28 |
+
t = time.time()
|
| 29 |
+
if use_minibatch:
|
| 30 |
+
kmeans = MiniBatchKMeans(n_clusters=n_clusters,verbose=verbose, batch_size=4096, max_iter=80).fit(features)
|
| 31 |
+
else:
|
| 32 |
+
kmeans = KMeans(n_clusters=n_clusters,verbose=verbose).fit(features)
|
| 33 |
+
print(time.time()-t, "s")
|
| 34 |
+
|
| 35 |
+
x = {
|
| 36 |
+
"n_features_in_": kmeans.n_features_in_,
|
| 37 |
+
"_n_threads": kmeans._n_threads,
|
| 38 |
+
"cluster_centers_": kmeans.cluster_centers_,
|
| 39 |
+
}
|
| 40 |
+
print("end")
|
| 41 |
+
|
| 42 |
+
return x
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
if __name__ == "__main__":
|
| 46 |
+
|
| 47 |
+
parser = argparse.ArgumentParser()
|
| 48 |
+
parser.add_argument('--dataset', type=Path, default="./dataset/44k",
|
| 49 |
+
help='path of training data directory')
|
| 50 |
+
parser.add_argument('--output', type=Path, default="logs/44k",
|
| 51 |
+
help='path of model output directory')
|
| 52 |
+
|
| 53 |
+
args = parser.parse_args()
|
| 54 |
+
|
| 55 |
+
checkpoint_dir = args.output
|
| 56 |
+
dataset = args.dataset
|
| 57 |
+
n_clusters = 10000
|
| 58 |
+
|
| 59 |
+
ckpt = {}
|
| 60 |
+
for spk in os.listdir(dataset):
|
| 61 |
+
if os.path.isdir(dataset/spk):
|
| 62 |
+
print(f"train kmeans for {spk}...")
|
| 63 |
+
in_dir = dataset/spk
|
| 64 |
+
x = train_cluster(in_dir, n_clusters, verbose=False)
|
| 65 |
+
ckpt[spk] = x
|
| 66 |
+
|
| 67 |
+
checkpoint_path = checkpoint_dir / f"kmeans_{n_clusters}.pt"
|
| 68 |
+
checkpoint_path.parent.mkdir(exist_ok=True, parents=True)
|
| 69 |
+
torch.save(
|
| 70 |
+
ckpt,
|
| 71 |
+
checkpoint_path,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# import cluster
|
| 76 |
+
# for spk in tqdm.tqdm(os.listdir("dataset")):
|
| 77 |
+
# if os.path.isdir(f"dataset/{spk}"):
|
| 78 |
+
# print(f"start kmeans inference for {spk}...")
|
| 79 |
+
# for feature_path in tqdm.tqdm(glob(f"dataset/{spk}/*.discrete.npy", recursive=True)):
|
| 80 |
+
# mel_path = feature_path.replace(".discrete.npy",".mel.npy")
|
| 81 |
+
# mel_spectrogram = np.load(mel_path)
|
| 82 |
+
# feature_len = mel_spectrogram.shape[-1]
|
| 83 |
+
# c = np.load(feature_path)
|
| 84 |
+
# c = utils.tools.repeat_expand_2d(torch.FloatTensor(c), feature_len).numpy()
|
| 85 |
+
# feature = c.T
|
| 86 |
+
# feature_class = cluster.get_cluster_result(feature, spk)
|
| 87 |
+
# np.save(feature_path.replace(".discrete.npy", ".discrete_class.npy"), feature_class)
|
| 88 |
+
|
| 89 |
+
|
configs/config.json
ADDED
|
File without changes
|
configs/momoi.json
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"train": {
|
| 3 |
+
"log_interval": 200,
|
| 4 |
+
"eval_interval": 800,
|
| 5 |
+
"seed": 1234,
|
| 6 |
+
"epochs": 30000,
|
| 7 |
+
"learning_rate": 0.0001,
|
| 8 |
+
"betas": [
|
| 9 |
+
0.8,
|
| 10 |
+
0.99
|
| 11 |
+
],
|
| 12 |
+
"eps": 1e-09,
|
| 13 |
+
"batch_size": 6,
|
| 14 |
+
"fp16_run": false,
|
| 15 |
+
"lr_decay": 0.999875,
|
| 16 |
+
"segment_size": 10240,
|
| 17 |
+
"init_lr_ratio": 1,
|
| 18 |
+
"warmup_epochs": 0,
|
| 19 |
+
"c_mel": 45,
|
| 20 |
+
"c_kl": 1.0,
|
| 21 |
+
"use_sr": true,
|
| 22 |
+
"max_speclen": 512,
|
| 23 |
+
"port": "8001",
|
| 24 |
+
"keep_ckpts": 8
|
| 25 |
+
},
|
| 26 |
+
"data": {
|
| 27 |
+
"training_files": "filelists/train.txt",
|
| 28 |
+
"validation_files": "filelists/val.txt",
|
| 29 |
+
"max_wav_value": 32768.0,
|
| 30 |
+
"sampling_rate": 44100,
|
| 31 |
+
"filter_length": 2048,
|
| 32 |
+
"hop_length": 512,
|
| 33 |
+
"win_length": 2048,
|
| 34 |
+
"n_mel_channels": 80,
|
| 35 |
+
"mel_fmin": 0.0,
|
| 36 |
+
"mel_fmax": 22050
|
| 37 |
+
},
|
| 38 |
+
"model": {
|
| 39 |
+
"inter_channels": 192,
|
| 40 |
+
"hidden_channels": 192,
|
| 41 |
+
"filter_channels": 768,
|
| 42 |
+
"n_heads": 2,
|
| 43 |
+
"n_layers": 6,
|
| 44 |
+
"kernel_size": 3,
|
| 45 |
+
"p_dropout": 0.1,
|
| 46 |
+
"resblock": "1",
|
| 47 |
+
"resblock_kernel_sizes": [
|
| 48 |
+
3,
|
| 49 |
+
7,
|
| 50 |
+
11
|
| 51 |
+
],
|
| 52 |
+
"resblock_dilation_sizes": [
|
| 53 |
+
[
|
| 54 |
+
1,
|
| 55 |
+
3,
|
| 56 |
+
5
|
| 57 |
+
],
|
| 58 |
+
[
|
| 59 |
+
1,
|
| 60 |
+
3,
|
| 61 |
+
5
|
| 62 |
+
],
|
| 63 |
+
[
|
| 64 |
+
1,
|
| 65 |
+
3,
|
| 66 |
+
5
|
| 67 |
+
]
|
| 68 |
+
],
|
| 69 |
+
"upsample_rates": [
|
| 70 |
+
8,
|
| 71 |
+
8,
|
| 72 |
+
2,
|
| 73 |
+
2,
|
| 74 |
+
2
|
| 75 |
+
],
|
| 76 |
+
"upsample_initial_channel": 512,
|
| 77 |
+
"upsample_kernel_sizes": [
|
| 78 |
+
16,
|
| 79 |
+
16,
|
| 80 |
+
4,
|
| 81 |
+
4,
|
| 82 |
+
4
|
| 83 |
+
],
|
| 84 |
+
"n_layers_q": 3,
|
| 85 |
+
"use_spectral_norm": false,
|
| 86 |
+
"gin_channels": 256,
|
| 87 |
+
"ssl_dim": 256,
|
| 88 |
+
"n_speakers": 200
|
| 89 |
+
},
|
| 90 |
+
"spk": {
|
| 91 |
+
"momoi": 0
|
| 92 |
+
}
|
| 93 |
+
}
|
configs_template/config_template.json
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"train": {
|
| 3 |
+
"log_interval": 200,
|
| 4 |
+
"eval_interval": 800,
|
| 5 |
+
"seed": 1234,
|
| 6 |
+
"epochs": 10000,
|
| 7 |
+
"learning_rate": 0.0001,
|
| 8 |
+
"betas": [
|
| 9 |
+
0.8,
|
| 10 |
+
0.99
|
| 11 |
+
],
|
| 12 |
+
"eps": 1e-09,
|
| 13 |
+
"batch_size": 6,
|
| 14 |
+
"fp16_run": false,
|
| 15 |
+
"lr_decay": 0.999875,
|
| 16 |
+
"segment_size": 10240,
|
| 17 |
+
"init_lr_ratio": 1,
|
| 18 |
+
"warmup_epochs": 0,
|
| 19 |
+
"c_mel": 45,
|
| 20 |
+
"c_kl": 1.0,
|
| 21 |
+
"use_sr": true,
|
| 22 |
+
"max_speclen": 512,
|
| 23 |
+
"port": "8001",
|
| 24 |
+
"keep_ckpts": 3
|
| 25 |
+
},
|
| 26 |
+
"data": {
|
| 27 |
+
"training_files": "filelists/train.txt",
|
| 28 |
+
"validation_files": "filelists/val.txt",
|
| 29 |
+
"max_wav_value": 32768.0,
|
| 30 |
+
"sampling_rate": 44100,
|
| 31 |
+
"filter_length": 2048,
|
| 32 |
+
"hop_length": 512,
|
| 33 |
+
"win_length": 2048,
|
| 34 |
+
"n_mel_channels": 80,
|
| 35 |
+
"mel_fmin": 0.0,
|
| 36 |
+
"mel_fmax": 22050
|
| 37 |
+
},
|
| 38 |
+
"model": {
|
| 39 |
+
"inter_channels": 192,
|
| 40 |
+
"hidden_channels": 192,
|
| 41 |
+
"filter_channels": 768,
|
| 42 |
+
"n_heads": 2,
|
| 43 |
+
"n_layers": 6,
|
| 44 |
+
"kernel_size": 3,
|
| 45 |
+
"p_dropout": 0.1,
|
| 46 |
+
"resblock": "1",
|
| 47 |
+
"resblock_kernel_sizes": [3,7,11],
|
| 48 |
+
"resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
|
| 49 |
+
"upsample_rates": [ 8, 8, 2, 2, 2],
|
| 50 |
+
"upsample_initial_channel": 512,
|
| 51 |
+
"upsample_kernel_sizes": [16,16, 4, 4, 4],
|
| 52 |
+
"n_layers_q": 3,
|
| 53 |
+
"use_spectral_norm": false,
|
| 54 |
+
"gin_channels": 256,
|
| 55 |
+
"ssl_dim": 256,
|
| 56 |
+
"n_speakers": 200
|
| 57 |
+
},
|
| 58 |
+
"spk": {
|
| 59 |
+
"nyaru": 0,
|
| 60 |
+
"huiyu": 1,
|
| 61 |
+
"nen": 2,
|
| 62 |
+
"paimon": 3,
|
| 63 |
+
"yunhao": 4
|
| 64 |
+
}
|
| 65 |
+
}
|
data_utils.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import torch.utils.data
|
| 7 |
+
|
| 8 |
+
import modules.commons as commons
|
| 9 |
+
import utils
|
| 10 |
+
from modules.mel_processing import spectrogram_torch, spec_to_mel_torch
|
| 11 |
+
from utils import load_wav_to_torch, load_filepaths_and_text
|
| 12 |
+
|
| 13 |
+
# import h5py
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
"""Multi speaker version"""
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class TextAudioSpeakerLoader(torch.utils.data.Dataset):
|
| 20 |
+
"""
|
| 21 |
+
1) loads audio, speaker_id, text pairs
|
| 22 |
+
2) normalizes text and converts them to sequences of integers
|
| 23 |
+
3) computes spectrograms from audio files.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, audiopaths, hparams):
|
| 27 |
+
self.audiopaths = load_filepaths_and_text(audiopaths)
|
| 28 |
+
self.max_wav_value = hparams.data.max_wav_value
|
| 29 |
+
self.sampling_rate = hparams.data.sampling_rate
|
| 30 |
+
self.filter_length = hparams.data.filter_length
|
| 31 |
+
self.hop_length = hparams.data.hop_length
|
| 32 |
+
self.win_length = hparams.data.win_length
|
| 33 |
+
self.sampling_rate = hparams.data.sampling_rate
|
| 34 |
+
self.use_sr = hparams.train.use_sr
|
| 35 |
+
self.spec_len = hparams.train.max_speclen
|
| 36 |
+
self.spk_map = hparams.spk
|
| 37 |
+
|
| 38 |
+
random.seed(1234)
|
| 39 |
+
random.shuffle(self.audiopaths)
|
| 40 |
+
|
| 41 |
+
def get_audio(self, filename):
|
| 42 |
+
filename = filename.replace("\\", "/")
|
| 43 |
+
audio, sampling_rate = load_wav_to_torch(filename)
|
| 44 |
+
if sampling_rate != self.sampling_rate:
|
| 45 |
+
raise ValueError("{} SR doesn't match target {} SR".format(
|
| 46 |
+
sampling_rate, self.sampling_rate))
|
| 47 |
+
audio_norm = audio / self.max_wav_value
|
| 48 |
+
audio_norm = audio_norm.unsqueeze(0)
|
| 49 |
+
spec_filename = filename.replace(".wav", ".spec.pt")
|
| 50 |
+
if os.path.exists(spec_filename):
|
| 51 |
+
spec = torch.load(spec_filename)
|
| 52 |
+
else:
|
| 53 |
+
spec = spectrogram_torch(audio_norm, self.filter_length,
|
| 54 |
+
self.sampling_rate, self.hop_length, self.win_length,
|
| 55 |
+
center=False)
|
| 56 |
+
spec = torch.squeeze(spec, 0)
|
| 57 |
+
torch.save(spec, spec_filename)
|
| 58 |
+
|
| 59 |
+
spk = filename.split("/")[-2]
|
| 60 |
+
spk = torch.LongTensor([self.spk_map[spk]])
|
| 61 |
+
|
| 62 |
+
f0 = np.load(filename + ".f0.npy")
|
| 63 |
+
f0, uv = utils.interpolate_f0(f0)
|
| 64 |
+
f0 = torch.FloatTensor(f0)
|
| 65 |
+
uv = torch.FloatTensor(uv)
|
| 66 |
+
|
| 67 |
+
c = torch.load(filename+ ".soft.pt")
|
| 68 |
+
c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[0])
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
lmin = min(c.size(-1), spec.size(-1))
|
| 72 |
+
assert abs(c.size(-1) - spec.size(-1)) < 3, (c.size(-1), spec.size(-1), f0.shape, filename)
|
| 73 |
+
assert abs(audio_norm.shape[1]-lmin * self.hop_length) < 3 * self.hop_length
|
| 74 |
+
spec, c, f0, uv = spec[:, :lmin], c[:, :lmin], f0[:lmin], uv[:lmin]
|
| 75 |
+
audio_norm = audio_norm[:, :lmin * self.hop_length]
|
| 76 |
+
# if spec.shape[1] < 30:
|
| 77 |
+
# print("skip too short audio:", filename)
|
| 78 |
+
# return None
|
| 79 |
+
if spec.shape[1] > 800:
|
| 80 |
+
start = random.randint(0, spec.shape[1]-800)
|
| 81 |
+
end = start + 790
|
| 82 |
+
spec, c, f0, uv = spec[:, start:end], c[:, start:end], f0[start:end], uv[start:end]
|
| 83 |
+
audio_norm = audio_norm[:, start * self.hop_length : end * self.hop_length]
|
| 84 |
+
|
| 85 |
+
return c, f0, spec, audio_norm, spk, uv
|
| 86 |
+
|
| 87 |
+
def __getitem__(self, index):
|
| 88 |
+
return self.get_audio(self.audiopaths[index][0])
|
| 89 |
+
|
| 90 |
+
def __len__(self):
|
| 91 |
+
return len(self.audiopaths)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class TextAudioCollate:
|
| 95 |
+
|
| 96 |
+
def __call__(self, batch):
|
| 97 |
+
batch = [b for b in batch if b is not None]
|
| 98 |
+
|
| 99 |
+
input_lengths, ids_sorted_decreasing = torch.sort(
|
| 100 |
+
torch.LongTensor([x[0].shape[1] for x in batch]),
|
| 101 |
+
dim=0, descending=True)
|
| 102 |
+
|
| 103 |
+
max_c_len = max([x[0].size(1) for x in batch])
|
| 104 |
+
max_wav_len = max([x[3].size(1) for x in batch])
|
| 105 |
+
|
| 106 |
+
lengths = torch.LongTensor(len(batch))
|
| 107 |
+
|
| 108 |
+
c_padded = torch.FloatTensor(len(batch), batch[0][0].shape[0], max_c_len)
|
| 109 |
+
f0_padded = torch.FloatTensor(len(batch), max_c_len)
|
| 110 |
+
spec_padded = torch.FloatTensor(len(batch), batch[0][2].shape[0], max_c_len)
|
| 111 |
+
wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
|
| 112 |
+
spkids = torch.LongTensor(len(batch), 1)
|
| 113 |
+
uv_padded = torch.FloatTensor(len(batch), max_c_len)
|
| 114 |
+
|
| 115 |
+
c_padded.zero_()
|
| 116 |
+
spec_padded.zero_()
|
| 117 |
+
f0_padded.zero_()
|
| 118 |
+
wav_padded.zero_()
|
| 119 |
+
uv_padded.zero_()
|
| 120 |
+
|
| 121 |
+
for i in range(len(ids_sorted_decreasing)):
|
| 122 |
+
row = batch[ids_sorted_decreasing[i]]
|
| 123 |
+
|
| 124 |
+
c = row[0]
|
| 125 |
+
c_padded[i, :, :c.size(1)] = c
|
| 126 |
+
lengths[i] = c.size(1)
|
| 127 |
+
|
| 128 |
+
f0 = row[1]
|
| 129 |
+
f0_padded[i, :f0.size(0)] = f0
|
| 130 |
+
|
| 131 |
+
spec = row[2]
|
| 132 |
+
spec_padded[i, :, :spec.size(1)] = spec
|
| 133 |
+
|
| 134 |
+
wav = row[3]
|
| 135 |
+
wav_padded[i, :, :wav.size(1)] = wav
|
| 136 |
+
|
| 137 |
+
spkids[i, 0] = row[4]
|
| 138 |
+
|
| 139 |
+
uv = row[5]
|
| 140 |
+
uv_padded[i, :uv.size(0)] = uv
|
| 141 |
+
|
| 142 |
+
return c_padded, f0_padded, spec_padded, wav_padded, spkids, lengths, uv_padded
|
dataset_raw/wav_structure.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
数据集准备
|
| 2 |
+
|
| 3 |
+
raw
|
| 4 |
+
├───speaker0
|
| 5 |
+
│ ├───xxx1-xxx1.wav
|
| 6 |
+
│ ├───...
|
| 7 |
+
│ └───Lxx-0xx8.wav
|
| 8 |
+
└───speaker1
|
| 9 |
+
├───xx2-0xxx2.wav
|
| 10 |
+
├───...
|
| 11 |
+
└───xxx7-xxx007.wav
|
| 12 |
+
|
| 13 |
+
此外还需要编辑config.json
|
| 14 |
+
|
| 15 |
+
"n_speakers": 10
|
| 16 |
+
|
| 17 |
+
"spk":{
|
| 18 |
+
"speaker0": 0,
|
| 19 |
+
"speaker1": 1,
|
| 20 |
+
}
|
filelists/test.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
./dataset/44k/taffy/000562.wav
|
| 2 |
+
./dataset/44k/nyaru/000011.wav
|
| 3 |
+
./dataset/44k/nyaru/000008.wav
|
| 4 |
+
./dataset/44k/taffy/000563.wav
|
filelists/train.txt
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
./dataset/44k/taffy/000549.wav
|
| 2 |
+
./dataset/44k/nyaru/000004.wav
|
| 3 |
+
./dataset/44k/nyaru/000006.wav
|
| 4 |
+
./dataset/44k/taffy/000551.wav
|
| 5 |
+
./dataset/44k/nyaru/000009.wav
|
| 6 |
+
./dataset/44k/taffy/000561.wav
|
| 7 |
+
./dataset/44k/nyaru/000001.wav
|
| 8 |
+
./dataset/44k/taffy/000553.wav
|
| 9 |
+
./dataset/44k/nyaru/000002.wav
|
| 10 |
+
./dataset/44k/taffy/000560.wav
|
| 11 |
+
./dataset/44k/taffy/000557.wav
|
| 12 |
+
./dataset/44k/nyaru/000005.wav
|
| 13 |
+
./dataset/44k/taffy/000554.wav
|
| 14 |
+
./dataset/44k/taffy/000550.wav
|
| 15 |
+
./dataset/44k/taffy/000559.wav
|
filelists/val.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
./dataset/44k/jingfen/jingfen3_84.wav
|
| 2 |
+
./dataset/44k/jingfen/jingfen2_1.wav
|
flask_api.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
import soundfile
|
| 5 |
+
import torch
|
| 6 |
+
import torchaudio
|
| 7 |
+
from flask import Flask, request, send_file
|
| 8 |
+
from flask_cors import CORS
|
| 9 |
+
|
| 10 |
+
from inference.infer_tool import Svc, RealTimeVC
|
| 11 |
+
|
| 12 |
+
app = Flask(__name__)
|
| 13 |
+
|
| 14 |
+
CORS(app)
|
| 15 |
+
|
| 16 |
+
logging.getLogger('numba').setLevel(logging.WARNING)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@app.route("/voiceChangeModel", methods=["POST"])
|
| 20 |
+
def voice_change_model():
|
| 21 |
+
request_form = request.form
|
| 22 |
+
wave_file = request.files.get("sample", None)
|
| 23 |
+
# 变调信息
|
| 24 |
+
f_pitch_change = float(request_form.get("fPitchChange", 0))
|
| 25 |
+
# DAW所需的采样率
|
| 26 |
+
daw_sample = int(float(request_form.get("sampleRate", 0)))
|
| 27 |
+
speaker_id = int(float(request_form.get("sSpeakId", 0)))
|
| 28 |
+
# http获得wav文件并转换
|
| 29 |
+
input_wav_path = io.BytesIO(wave_file.read())
|
| 30 |
+
|
| 31 |
+
# 模型推理
|
| 32 |
+
if raw_infer:
|
| 33 |
+
out_audio, out_sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path)
|
| 34 |
+
tar_audio = torchaudio.functional.resample(out_audio, svc_model.target_sample, daw_sample)
|
| 35 |
+
else:
|
| 36 |
+
out_audio = svc.process(svc_model, speaker_id, f_pitch_change, input_wav_path)
|
| 37 |
+
tar_audio = torchaudio.functional.resample(torch.from_numpy(out_audio), svc_model.target_sample, daw_sample)
|
| 38 |
+
# 返回音频
|
| 39 |
+
out_wav_path = io.BytesIO()
|
| 40 |
+
soundfile.write(out_wav_path, tar_audio.cpu().numpy(), daw_sample, format="wav")
|
| 41 |
+
out_wav_path.seek(0)
|
| 42 |
+
return send_file(out_wav_path, download_name="temp.wav", as_attachment=True)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
if __name__ == '__main__':
|
| 46 |
+
# 启用则为直接切片合成,False为交叉淡化方式
|
| 47 |
+
# vst插件调整0.3-0.5s切片时间可以降低延迟,直接切片方法会有连接处爆音、交叉淡化会有轻微重叠声音
|
| 48 |
+
# 自行选择能接受的方法,或将vst最大切片时间调整为1s,此处设为Ture,延迟大音质稳定一些
|
| 49 |
+
raw_infer = True
|
| 50 |
+
# 每个模型和config是唯一对应的
|
| 51 |
+
model_name = "logs/32k/G_174000-Copy1.pth"
|
| 52 |
+
config_name = "configs/config.json"
|
| 53 |
+
svc_model = Svc(model_name, config_name)
|
| 54 |
+
svc = RealTimeVC()
|
| 55 |
+
# 此处与vst插件对应,不建议更改
|
| 56 |
+
app.run(port=6842, host="0.0.0.0", debug=False, threaded=False)
|
flask_api_full_song.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import numpy as np
|
| 3 |
+
import soundfile
|
| 4 |
+
from flask import Flask, request, send_file
|
| 5 |
+
|
| 6 |
+
from inference import infer_tool
|
| 7 |
+
from inference import slicer
|
| 8 |
+
|
| 9 |
+
app = Flask(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@app.route("/wav2wav", methods=["POST"])
|
| 13 |
+
def wav2wav():
|
| 14 |
+
request_form = request.form
|
| 15 |
+
audio_path = request_form.get("audio_path", None) # wav文件地址
|
| 16 |
+
tran = int(float(request_form.get("tran", 0))) # 音调
|
| 17 |
+
spk = request_form.get("spk", 0) # 说话人(id或者name都可以,具体看你的config)
|
| 18 |
+
wav_format = request_form.get("wav_format", 'wav') # 范围文件格式
|
| 19 |
+
infer_tool.format_wav(audio_path)
|
| 20 |
+
chunks = slicer.cut(audio_path, db_thresh=-40)
|
| 21 |
+
audio_data, audio_sr = slicer.chunks2audio(audio_path, chunks)
|
| 22 |
+
|
| 23 |
+
audio = []
|
| 24 |
+
for (slice_tag, data) in audio_data:
|
| 25 |
+
print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
|
| 26 |
+
|
| 27 |
+
length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample))
|
| 28 |
+
if slice_tag:
|
| 29 |
+
print('jump empty segment')
|
| 30 |
+
_audio = np.zeros(length)
|
| 31 |
+
else:
|
| 32 |
+
# padd
|
| 33 |
+
pad_len = int(audio_sr * 0.5)
|
| 34 |
+
data = np.concatenate([np.zeros([pad_len]), data, np.zeros([pad_len])])
|
| 35 |
+
raw_path = io.BytesIO()
|
| 36 |
+
soundfile.write(raw_path, data, audio_sr, format="wav")
|
| 37 |
+
raw_path.seek(0)
|
| 38 |
+
out_audio, out_sr = svc_model.infer(spk, tran, raw_path)
|
| 39 |
+
svc_model.clear_empty()
|
| 40 |
+
_audio = out_audio.cpu().numpy()
|
| 41 |
+
pad_len = int(svc_model.target_sample * 0.5)
|
| 42 |
+
_audio = _audio[pad_len:-pad_len]
|
| 43 |
+
|
| 44 |
+
audio.extend(list(infer_tool.pad_array(_audio, length)))
|
| 45 |
+
out_wav_path = io.BytesIO()
|
| 46 |
+
soundfile.write(out_wav_path, audio, svc_model.target_sample, format=wav_format)
|
| 47 |
+
out_wav_path.seek(0)
|
| 48 |
+
return send_file(out_wav_path, download_name=f"temp.{wav_format}", as_attachment=True)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
if __name__ == '__main__':
|
| 52 |
+
model_name = "logs/44k/G_60000.pth" # 模型地址
|
| 53 |
+
config_name = "configs/config.json" # config地址
|
| 54 |
+
svc_model = infer_tool.Svc(model_name, config_name)
|
| 55 |
+
app.run(port=1145, host="0.0.0.0", debug=False, threaded=False)
|
hubert/__init__.py
ADDED
|
File without changes
|
hubert/checkpoint_best_legacy_500.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60d936ec5a566776fc392e69ad8b630d14eb588111233fe313436e200a7b187b
|
| 3 |
+
size 1330114945
|
hubert/hubert_model.py
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import random
|
| 3 |
+
from typing import Optional, Tuple
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch.nn.functional as t_func
|
| 8 |
+
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Hubert(nn.Module):
|
| 12 |
+
def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
|
| 13 |
+
super().__init__()
|
| 14 |
+
self._mask = mask
|
| 15 |
+
self.feature_extractor = FeatureExtractor()
|
| 16 |
+
self.feature_projection = FeatureProjection()
|
| 17 |
+
self.positional_embedding = PositionalConvEmbedding()
|
| 18 |
+
self.norm = nn.LayerNorm(768)
|
| 19 |
+
self.dropout = nn.Dropout(0.1)
|
| 20 |
+
self.encoder = TransformerEncoder(
|
| 21 |
+
nn.TransformerEncoderLayer(
|
| 22 |
+
768, 12, 3072, activation="gelu", batch_first=True
|
| 23 |
+
),
|
| 24 |
+
12,
|
| 25 |
+
)
|
| 26 |
+
self.proj = nn.Linear(768, 256)
|
| 27 |
+
|
| 28 |
+
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
|
| 29 |
+
self.label_embedding = nn.Embedding(num_label_embeddings, 256)
|
| 30 |
+
|
| 31 |
+
def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 32 |
+
mask = None
|
| 33 |
+
if self.training and self._mask:
|
| 34 |
+
mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
|
| 35 |
+
x[mask] = self.masked_spec_embed.to(x.dtype)
|
| 36 |
+
return x, mask
|
| 37 |
+
|
| 38 |
+
def encode(
|
| 39 |
+
self, x: torch.Tensor, layer: Optional[int] = None
|
| 40 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 41 |
+
x = self.feature_extractor(x)
|
| 42 |
+
x = self.feature_projection(x.transpose(1, 2))
|
| 43 |
+
x, mask = self.mask(x)
|
| 44 |
+
x = x + self.positional_embedding(x)
|
| 45 |
+
x = self.dropout(self.norm(x))
|
| 46 |
+
x = self.encoder(x, output_layer=layer)
|
| 47 |
+
return x, mask
|
| 48 |
+
|
| 49 |
+
def logits(self, x: torch.Tensor) -> torch.Tensor:
|
| 50 |
+
logits = torch.cosine_similarity(
|
| 51 |
+
x.unsqueeze(2),
|
| 52 |
+
self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
|
| 53 |
+
dim=-1,
|
| 54 |
+
)
|
| 55 |
+
return logits / 0.1
|
| 56 |
+
|
| 57 |
+
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 58 |
+
x, mask = self.encode(x)
|
| 59 |
+
x = self.proj(x)
|
| 60 |
+
logits = self.logits(x)
|
| 61 |
+
return logits, mask
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class HubertSoft(Hubert):
|
| 65 |
+
def __init__(self):
|
| 66 |
+
super().__init__()
|
| 67 |
+
|
| 68 |
+
@torch.inference_mode()
|
| 69 |
+
def units(self, wav: torch.Tensor) -> torch.Tensor:
|
| 70 |
+
wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
|
| 71 |
+
x, _ = self.encode(wav)
|
| 72 |
+
return self.proj(x)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class FeatureExtractor(nn.Module):
|
| 76 |
+
def __init__(self):
|
| 77 |
+
super().__init__()
|
| 78 |
+
self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
|
| 79 |
+
self.norm0 = nn.GroupNorm(512, 512)
|
| 80 |
+
self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
| 81 |
+
self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
| 82 |
+
self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
| 83 |
+
self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
| 84 |
+
self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
|
| 85 |
+
self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
|
| 86 |
+
|
| 87 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 88 |
+
x = t_func.gelu(self.norm0(self.conv0(x)))
|
| 89 |
+
x = t_func.gelu(self.conv1(x))
|
| 90 |
+
x = t_func.gelu(self.conv2(x))
|
| 91 |
+
x = t_func.gelu(self.conv3(x))
|
| 92 |
+
x = t_func.gelu(self.conv4(x))
|
| 93 |
+
x = t_func.gelu(self.conv5(x))
|
| 94 |
+
x = t_func.gelu(self.conv6(x))
|
| 95 |
+
return x
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class FeatureProjection(nn.Module):
|
| 99 |
+
def __init__(self):
|
| 100 |
+
super().__init__()
|
| 101 |
+
self.norm = nn.LayerNorm(512)
|
| 102 |
+
self.projection = nn.Linear(512, 768)
|
| 103 |
+
self.dropout = nn.Dropout(0.1)
|
| 104 |
+
|
| 105 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 106 |
+
x = self.norm(x)
|
| 107 |
+
x = self.projection(x)
|
| 108 |
+
x = self.dropout(x)
|
| 109 |
+
return x
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class PositionalConvEmbedding(nn.Module):
|
| 113 |
+
def __init__(self):
|
| 114 |
+
super().__init__()
|
| 115 |
+
self.conv = nn.Conv1d(
|
| 116 |
+
768,
|
| 117 |
+
768,
|
| 118 |
+
kernel_size=128,
|
| 119 |
+
padding=128 // 2,
|
| 120 |
+
groups=16,
|
| 121 |
+
)
|
| 122 |
+
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
|
| 123 |
+
|
| 124 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 125 |
+
x = self.conv(x.transpose(1, 2))
|
| 126 |
+
x = t_func.gelu(x[:, :, :-1])
|
| 127 |
+
return x.transpose(1, 2)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class TransformerEncoder(nn.Module):
|
| 131 |
+
def __init__(
|
| 132 |
+
self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
|
| 133 |
+
) -> None:
|
| 134 |
+
super(TransformerEncoder, self).__init__()
|
| 135 |
+
self.layers = nn.ModuleList(
|
| 136 |
+
[copy.deepcopy(encoder_layer) for _ in range(num_layers)]
|
| 137 |
+
)
|
| 138 |
+
self.num_layers = num_layers
|
| 139 |
+
|
| 140 |
+
def forward(
|
| 141 |
+
self,
|
| 142 |
+
src: torch.Tensor,
|
| 143 |
+
mask: torch.Tensor = None,
|
| 144 |
+
src_key_padding_mask: torch.Tensor = None,
|
| 145 |
+
output_layer: Optional[int] = None,
|
| 146 |
+
) -> torch.Tensor:
|
| 147 |
+
output = src
|
| 148 |
+
for layer in self.layers[:output_layer]:
|
| 149 |
+
output = layer(
|
| 150 |
+
output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
|
| 151 |
+
)
|
| 152 |
+
return output
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _compute_mask(
|
| 156 |
+
shape: Tuple[int, int],
|
| 157 |
+
mask_prob: float,
|
| 158 |
+
mask_length: int,
|
| 159 |
+
device: torch.device,
|
| 160 |
+
min_masks: int = 0,
|
| 161 |
+
) -> torch.Tensor:
|
| 162 |
+
batch_size, sequence_length = shape
|
| 163 |
+
|
| 164 |
+
if mask_length < 1:
|
| 165 |
+
raise ValueError("`mask_length` has to be bigger than 0.")
|
| 166 |
+
|
| 167 |
+
if mask_length > sequence_length:
|
| 168 |
+
raise ValueError(
|
| 169 |
+
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
# compute number of masked spans in batch
|
| 173 |
+
num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
|
| 174 |
+
num_masked_spans = max(num_masked_spans, min_masks)
|
| 175 |
+
|
| 176 |
+
# make sure num masked indices <= sequence_length
|
| 177 |
+
if num_masked_spans * mask_length > sequence_length:
|
| 178 |
+
num_masked_spans = sequence_length // mask_length
|
| 179 |
+
|
| 180 |
+
# SpecAugment mask to fill
|
| 181 |
+
mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
|
| 182 |
+
|
| 183 |
+
# uniform distribution to sample from, make sure that offset samples are < sequence_length
|
| 184 |
+
uniform_dist = torch.ones(
|
| 185 |
+
(batch_size, sequence_length - (mask_length - 1)), device=device
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
# get random indices to mask
|
| 189 |
+
mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
|
| 190 |
+
|
| 191 |
+
# expand masked indices to masked spans
|
| 192 |
+
mask_indices = (
|
| 193 |
+
mask_indices.unsqueeze(dim=-1)
|
| 194 |
+
.expand((batch_size, num_masked_spans, mask_length))
|
| 195 |
+
.reshape(batch_size, num_masked_spans * mask_length)
|
| 196 |
+
)
|
| 197 |
+
offsets = (
|
| 198 |
+
torch.arange(mask_length, device=device)[None, None, :]
|
| 199 |
+
.expand((batch_size, num_masked_spans, mask_length))
|
| 200 |
+
.reshape(batch_size, num_masked_spans * mask_length)
|
| 201 |
+
)
|
| 202 |
+
mask_idxs = mask_indices + offsets
|
| 203 |
+
|
| 204 |
+
# scatter indices to mask
|
| 205 |
+
mask = mask.scatter(1, mask_idxs, True)
|
| 206 |
+
|
| 207 |
+
return mask
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def hubert_soft(
|
| 211 |
+
path: str,
|
| 212 |
+
) -> HubertSoft:
|
| 213 |
+
r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
|
| 214 |
+
Args:
|
| 215 |
+
path (str): path of a pretrained model
|
| 216 |
+
"""
|
| 217 |
+
hubert = HubertSoft()
|
| 218 |
+
checkpoint = torch.load(path)
|
| 219 |
+
consume_prefix_in_state_dict_if_present(checkpoint, "module.")
|
| 220 |
+
hubert.load_state_dict(checkpoint)
|
| 221 |
+
hubert.eval()
|
| 222 |
+
return hubert
|
hubert/hubert_model_onnx.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import random
|
| 3 |
+
from typing import Optional, Tuple
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch.nn.functional as t_func
|
| 8 |
+
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Hubert(nn.Module):
|
| 12 |
+
def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
|
| 13 |
+
super().__init__()
|
| 14 |
+
self._mask = mask
|
| 15 |
+
self.feature_extractor = FeatureExtractor()
|
| 16 |
+
self.feature_projection = FeatureProjection()
|
| 17 |
+
self.positional_embedding = PositionalConvEmbedding()
|
| 18 |
+
self.norm = nn.LayerNorm(768)
|
| 19 |
+
self.dropout = nn.Dropout(0.1)
|
| 20 |
+
self.encoder = TransformerEncoder(
|
| 21 |
+
nn.TransformerEncoderLayer(
|
| 22 |
+
768, 12, 3072, activation="gelu", batch_first=True
|
| 23 |
+
),
|
| 24 |
+
12,
|
| 25 |
+
)
|
| 26 |
+
self.proj = nn.Linear(768, 256)
|
| 27 |
+
|
| 28 |
+
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
|
| 29 |
+
self.label_embedding = nn.Embedding(num_label_embeddings, 256)
|
| 30 |
+
|
| 31 |
+
def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 32 |
+
mask = None
|
| 33 |
+
if self.training and self._mask:
|
| 34 |
+
mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
|
| 35 |
+
x[mask] = self.masked_spec_embed.to(x.dtype)
|
| 36 |
+
return x, mask
|
| 37 |
+
|
| 38 |
+
def encode(
|
| 39 |
+
self, x: torch.Tensor, layer: Optional[int] = None
|
| 40 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 41 |
+
x = self.feature_extractor(x)
|
| 42 |
+
x = self.feature_projection(x.transpose(1, 2))
|
| 43 |
+
x, mask = self.mask(x)
|
| 44 |
+
x = x + self.positional_embedding(x)
|
| 45 |
+
x = self.dropout(self.norm(x))
|
| 46 |
+
x = self.encoder(x, output_layer=layer)
|
| 47 |
+
return x, mask
|
| 48 |
+
|
| 49 |
+
def logits(self, x: torch.Tensor) -> torch.Tensor:
|
| 50 |
+
logits = torch.cosine_similarity(
|
| 51 |
+
x.unsqueeze(2),
|
| 52 |
+
self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
|
| 53 |
+
dim=-1,
|
| 54 |
+
)
|
| 55 |
+
return logits / 0.1
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class HubertSoft(Hubert):
|
| 59 |
+
def __init__(self):
|
| 60 |
+
super().__init__()
|
| 61 |
+
|
| 62 |
+
def units(self, wav: torch.Tensor) -> torch.Tensor:
|
| 63 |
+
wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
|
| 64 |
+
x, _ = self.encode(wav)
|
| 65 |
+
return self.proj(x)
|
| 66 |
+
|
| 67 |
+
def forward(self, x):
|
| 68 |
+
return self.units(x)
|
| 69 |
+
|
| 70 |
+
class FeatureExtractor(nn.Module):
|
| 71 |
+
def __init__(self):
|
| 72 |
+
super().__init__()
|
| 73 |
+
self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
|
| 74 |
+
self.norm0 = nn.GroupNorm(512, 512)
|
| 75 |
+
self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
| 76 |
+
self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
| 77 |
+
self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
| 78 |
+
self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
| 79 |
+
self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
|
| 80 |
+
self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
|
| 81 |
+
|
| 82 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 83 |
+
x = t_func.gelu(self.norm0(self.conv0(x)))
|
| 84 |
+
x = t_func.gelu(self.conv1(x))
|
| 85 |
+
x = t_func.gelu(self.conv2(x))
|
| 86 |
+
x = t_func.gelu(self.conv3(x))
|
| 87 |
+
x = t_func.gelu(self.conv4(x))
|
| 88 |
+
x = t_func.gelu(self.conv5(x))
|
| 89 |
+
x = t_func.gelu(self.conv6(x))
|
| 90 |
+
return x
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class FeatureProjection(nn.Module):
|
| 94 |
+
def __init__(self):
|
| 95 |
+
super().__init__()
|
| 96 |
+
self.norm = nn.LayerNorm(512)
|
| 97 |
+
self.projection = nn.Linear(512, 768)
|
| 98 |
+
self.dropout = nn.Dropout(0.1)
|
| 99 |
+
|
| 100 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 101 |
+
x = self.norm(x)
|
| 102 |
+
x = self.projection(x)
|
| 103 |
+
x = self.dropout(x)
|
| 104 |
+
return x
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class PositionalConvEmbedding(nn.Module):
|
| 108 |
+
def __init__(self):
|
| 109 |
+
super().__init__()
|
| 110 |
+
self.conv = nn.Conv1d(
|
| 111 |
+
768,
|
| 112 |
+
768,
|
| 113 |
+
kernel_size=128,
|
| 114 |
+
padding=128 // 2,
|
| 115 |
+
groups=16,
|
| 116 |
+
)
|
| 117 |
+
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
|
| 118 |
+
|
| 119 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 120 |
+
x = self.conv(x.transpose(1, 2))
|
| 121 |
+
x = t_func.gelu(x[:, :, :-1])
|
| 122 |
+
return x.transpose(1, 2)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class TransformerEncoder(nn.Module):
|
| 126 |
+
def __init__(
|
| 127 |
+
self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
|
| 128 |
+
) -> None:
|
| 129 |
+
super(TransformerEncoder, self).__init__()
|
| 130 |
+
self.layers = nn.ModuleList(
|
| 131 |
+
[copy.deepcopy(encoder_layer) for _ in range(num_layers)]
|
| 132 |
+
)
|
| 133 |
+
self.num_layers = num_layers
|
| 134 |
+
|
| 135 |
+
def forward(
|
| 136 |
+
self,
|
| 137 |
+
src: torch.Tensor,
|
| 138 |
+
mask: torch.Tensor = None,
|
| 139 |
+
src_key_padding_mask: torch.Tensor = None,
|
| 140 |
+
output_layer: Optional[int] = None,
|
| 141 |
+
) -> torch.Tensor:
|
| 142 |
+
output = src
|
| 143 |
+
for layer in self.layers[:output_layer]:
|
| 144 |
+
output = layer(
|
| 145 |
+
output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
|
| 146 |
+
)
|
| 147 |
+
return output
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def _compute_mask(
|
| 151 |
+
shape: Tuple[int, int],
|
| 152 |
+
mask_prob: float,
|
| 153 |
+
mask_length: int,
|
| 154 |
+
device: torch.device,
|
| 155 |
+
min_masks: int = 0,
|
| 156 |
+
) -> torch.Tensor:
|
| 157 |
+
batch_size, sequence_length = shape
|
| 158 |
+
|
| 159 |
+
if mask_length < 1:
|
| 160 |
+
raise ValueError("`mask_length` has to be bigger than 0.")
|
| 161 |
+
|
| 162 |
+
if mask_length > sequence_length:
|
| 163 |
+
raise ValueError(
|
| 164 |
+
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
# compute number of masked spans in batch
|
| 168 |
+
num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
|
| 169 |
+
num_masked_spans = max(num_masked_spans, min_masks)
|
| 170 |
+
|
| 171 |
+
# make sure num masked indices <= sequence_length
|
| 172 |
+
if num_masked_spans * mask_length > sequence_length:
|
| 173 |
+
num_masked_spans = sequence_length // mask_length
|
| 174 |
+
|
| 175 |
+
# SpecAugment mask to fill
|
| 176 |
+
mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
|
| 177 |
+
|
| 178 |
+
# uniform distribution to sample from, make sure that offset samples are < sequence_length
|
| 179 |
+
uniform_dist = torch.ones(
|
| 180 |
+
(batch_size, sequence_length - (mask_length - 1)), device=device
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
# get random indices to mask
|
| 184 |
+
mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
|
| 185 |
+
|
| 186 |
+
# expand masked indices to masked spans
|
| 187 |
+
mask_indices = (
|
| 188 |
+
mask_indices.unsqueeze(dim=-1)
|
| 189 |
+
.expand((batch_size, num_masked_spans, mask_length))
|
| 190 |
+
.reshape(batch_size, num_masked_spans * mask_length)
|
| 191 |
+
)
|
| 192 |
+
offsets = (
|
| 193 |
+
torch.arange(mask_length, device=device)[None, None, :]
|
| 194 |
+
.expand((batch_size, num_masked_spans, mask_length))
|
| 195 |
+
.reshape(batch_size, num_masked_spans * mask_length)
|
| 196 |
+
)
|
| 197 |
+
mask_idxs = mask_indices + offsets
|
| 198 |
+
|
| 199 |
+
# scatter indices to mask
|
| 200 |
+
mask = mask.scatter(1, mask_idxs, True)
|
| 201 |
+
|
| 202 |
+
return mask
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def hubert_soft(
|
| 206 |
+
path: str,
|
| 207 |
+
) -> HubertSoft:
|
| 208 |
+
r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
|
| 209 |
+
Args:
|
| 210 |
+
path (str): path of a pretrained model
|
| 211 |
+
"""
|
| 212 |
+
hubert = HubertSoft()
|
| 213 |
+
checkpoint = torch.load(path)
|
| 214 |
+
consume_prefix_in_state_dict_if_present(checkpoint, "module.")
|
| 215 |
+
hubert.load_state_dict(checkpoint)
|
| 216 |
+
hubert.eval()
|
| 217 |
+
return hubert
|
hubert/put_hubert_ckpt_here
ADDED
|
File without changes
|
inference/__init__.py
ADDED
|
File without changes
|
inference/infer_tool.py
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import io
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
import os
|
| 6 |
+
import time
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from inference import slicer
|
| 9 |
+
|
| 10 |
+
import librosa
|
| 11 |
+
import numpy as np
|
| 12 |
+
# import onnxruntime
|
| 13 |
+
import parselmouth
|
| 14 |
+
import soundfile
|
| 15 |
+
import torch
|
| 16 |
+
import torchaudio
|
| 17 |
+
|
| 18 |
+
import cluster
|
| 19 |
+
from hubert import hubert_model
|
| 20 |
+
import utils
|
| 21 |
+
from models import SynthesizerTrn
|
| 22 |
+
|
| 23 |
+
logging.getLogger('matplotlib').setLevel(logging.WARNING)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def read_temp(file_name):
|
| 27 |
+
if not os.path.exists(file_name):
|
| 28 |
+
with open(file_name, "w") as f:
|
| 29 |
+
f.write(json.dumps({"info": "temp_dict"}))
|
| 30 |
+
return {}
|
| 31 |
+
else:
|
| 32 |
+
try:
|
| 33 |
+
with open(file_name, "r") as f:
|
| 34 |
+
data = f.read()
|
| 35 |
+
data_dict = json.loads(data)
|
| 36 |
+
if os.path.getsize(file_name) > 50 * 1024 * 1024:
|
| 37 |
+
f_name = file_name.replace("\\", "/").split("/")[-1]
|
| 38 |
+
print(f"clean {f_name}")
|
| 39 |
+
for wav_hash in list(data_dict.keys()):
|
| 40 |
+
if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600:
|
| 41 |
+
del data_dict[wav_hash]
|
| 42 |
+
except Exception as e:
|
| 43 |
+
print(e)
|
| 44 |
+
print(f"{file_name} error,auto rebuild file")
|
| 45 |
+
data_dict = {"info": "temp_dict"}
|
| 46 |
+
return data_dict
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def write_temp(file_name, data):
|
| 50 |
+
with open(file_name, "w") as f:
|
| 51 |
+
f.write(json.dumps(data))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def timeit(func):
|
| 55 |
+
def run(*args, **kwargs):
|
| 56 |
+
t = time.time()
|
| 57 |
+
res = func(*args, **kwargs)
|
| 58 |
+
print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t))
|
| 59 |
+
return res
|
| 60 |
+
|
| 61 |
+
return run
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def format_wav(audio_path):
|
| 65 |
+
if Path(audio_path).suffix == '.wav':
|
| 66 |
+
return
|
| 67 |
+
raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True, sr=None)
|
| 68 |
+
soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def get_end_file(dir_path, end):
|
| 72 |
+
file_lists = []
|
| 73 |
+
for root, dirs, files in os.walk(dir_path):
|
| 74 |
+
files = [f for f in files if f[0] != '.']
|
| 75 |
+
dirs[:] = [d for d in dirs if d[0] != '.']
|
| 76 |
+
for f_file in files:
|
| 77 |
+
if f_file.endswith(end):
|
| 78 |
+
file_lists.append(os.path.join(root, f_file).replace("\\", "/"))
|
| 79 |
+
return file_lists
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def get_md5(content):
|
| 83 |
+
return hashlib.new("md5", content).hexdigest()
|
| 84 |
+
|
| 85 |
+
def fill_a_to_b(a, b):
|
| 86 |
+
if len(a) < len(b):
|
| 87 |
+
for _ in range(0, len(b) - len(a)):
|
| 88 |
+
a.append(a[0])
|
| 89 |
+
|
| 90 |
+
def mkdir(paths: list):
|
| 91 |
+
for path in paths:
|
| 92 |
+
if not os.path.exists(path):
|
| 93 |
+
os.mkdir(path)
|
| 94 |
+
|
| 95 |
+
def pad_array(arr, target_length):
|
| 96 |
+
current_length = arr.shape[0]
|
| 97 |
+
if current_length >= target_length:
|
| 98 |
+
return arr
|
| 99 |
+
else:
|
| 100 |
+
pad_width = target_length - current_length
|
| 101 |
+
pad_left = pad_width // 2
|
| 102 |
+
pad_right = pad_width - pad_left
|
| 103 |
+
padded_arr = np.pad(arr, (pad_left, pad_right), 'constant', constant_values=(0, 0))
|
| 104 |
+
return padded_arr
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class Svc(object):
|
| 108 |
+
def __init__(self, net_g_path, config_path,
|
| 109 |
+
device=None,
|
| 110 |
+
cluster_model_path="logs/44k/kmeans_10000.pt"):
|
| 111 |
+
self.net_g_path = net_g_path
|
| 112 |
+
if device is None:
|
| 113 |
+
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 114 |
+
else:
|
| 115 |
+
self.dev = torch.device(device)
|
| 116 |
+
self.net_g_ms = None
|
| 117 |
+
self.hps_ms = utils.get_hparams_from_file(config_path)
|
| 118 |
+
self.target_sample = self.hps_ms.data.sampling_rate
|
| 119 |
+
self.hop_size = self.hps_ms.data.hop_length
|
| 120 |
+
self.spk2id = self.hps_ms.spk
|
| 121 |
+
# 加载hubert
|
| 122 |
+
self.hubert_model = utils.get_hubert_model().to(self.dev)
|
| 123 |
+
self.load_model()
|
| 124 |
+
if os.path.exists(cluster_model_path):
|
| 125 |
+
self.cluster_model = cluster.get_cluster_model(cluster_model_path)
|
| 126 |
+
|
| 127 |
+
def load_model(self):
|
| 128 |
+
# 获取模型配置
|
| 129 |
+
self.net_g_ms = SynthesizerTrn(
|
| 130 |
+
self.hps_ms.data.filter_length // 2 + 1,
|
| 131 |
+
self.hps_ms.train.segment_size // self.hps_ms.data.hop_length,
|
| 132 |
+
**self.hps_ms.model)
|
| 133 |
+
_ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None)
|
| 134 |
+
if "half" in self.net_g_path and torch.cuda.is_available():
|
| 135 |
+
_ = self.net_g_ms.half().eval().to(self.dev)
|
| 136 |
+
else:
|
| 137 |
+
_ = self.net_g_ms.eval().to(self.dev)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def get_unit_f0(self, in_path, tran, cluster_infer_ratio, speaker):
|
| 142 |
+
|
| 143 |
+
wav, sr = librosa.load(in_path, sr=self.target_sample)
|
| 144 |
+
|
| 145 |
+
f0 = utils.compute_f0_parselmouth(wav, sampling_rate=self.target_sample, hop_length=self.hop_size)
|
| 146 |
+
f0, uv = utils.interpolate_f0(f0)
|
| 147 |
+
f0 = torch.FloatTensor(f0)
|
| 148 |
+
uv = torch.FloatTensor(uv)
|
| 149 |
+
f0 = f0 * 2 ** (tran / 12)
|
| 150 |
+
f0 = f0.unsqueeze(0).to(self.dev)
|
| 151 |
+
uv = uv.unsqueeze(0).to(self.dev)
|
| 152 |
+
|
| 153 |
+
wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000)
|
| 154 |
+
wav16k = torch.from_numpy(wav16k).to(self.dev)
|
| 155 |
+
c = utils.get_hubert_content(self.hubert_model, wav_16k_tensor=wav16k)
|
| 156 |
+
c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
|
| 157 |
+
|
| 158 |
+
if cluster_infer_ratio !=0:
|
| 159 |
+
cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T
|
| 160 |
+
cluster_c = torch.FloatTensor(cluster_c).to(self.dev)
|
| 161 |
+
c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c
|
| 162 |
+
|
| 163 |
+
c = c.unsqueeze(0)
|
| 164 |
+
return c, f0, uv
|
| 165 |
+
|
| 166 |
+
def infer(self, speaker, tran, raw_path,
|
| 167 |
+
cluster_infer_ratio=0,
|
| 168 |
+
auto_predict_f0=False,
|
| 169 |
+
noice_scale=0.4):
|
| 170 |
+
speaker_id = self.spk2id.__dict__.get(speaker)
|
| 171 |
+
if not speaker_id and type(speaker) is int:
|
| 172 |
+
if len(self.spk2id.__dict__) >= speaker:
|
| 173 |
+
speaker_id = speaker
|
| 174 |
+
sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0)
|
| 175 |
+
c, f0, uv = self.get_unit_f0(raw_path, tran, cluster_infer_ratio, speaker)
|
| 176 |
+
if "half" in self.net_g_path and torch.cuda.is_available():
|
| 177 |
+
c = c.half()
|
| 178 |
+
with torch.no_grad():
|
| 179 |
+
start = time.time()
|
| 180 |
+
audio = self.net_g_ms.infer(c, f0=f0, g=sid, uv=uv, predict_f0=auto_predict_f0, noice_scale=noice_scale)[0,0].data.float()
|
| 181 |
+
use_time = time.time() - start
|
| 182 |
+
print("vits use time:{}".format(use_time))
|
| 183 |
+
return audio, audio.shape[-1]
|
| 184 |
+
|
| 185 |
+
def clear_empty(self):
|
| 186 |
+
# 清理显存
|
| 187 |
+
torch.cuda.empty_cache()
|
| 188 |
+
|
| 189 |
+
def slice_inference(self,raw_audio_path, spk, tran, slice_db,cluster_infer_ratio, auto_predict_f0,noice_scale, pad_seconds=0.5):
|
| 190 |
+
wav_path = raw_audio_path
|
| 191 |
+
chunks = slicer.cut(wav_path, db_thresh=slice_db)
|
| 192 |
+
audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
|
| 193 |
+
|
| 194 |
+
audio = []
|
| 195 |
+
for (slice_tag, data) in audio_data:
|
| 196 |
+
print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
|
| 197 |
+
# padd
|
| 198 |
+
pad_len = int(audio_sr * pad_seconds)
|
| 199 |
+
data = np.concatenate([np.zeros([pad_len]), data, np.zeros([pad_len])])
|
| 200 |
+
length = int(np.ceil(len(data) / audio_sr * self.target_sample))
|
| 201 |
+
raw_path = io.BytesIO()
|
| 202 |
+
soundfile.write(raw_path, data, audio_sr, format="wav")
|
| 203 |
+
raw_path.seek(0)
|
| 204 |
+
if slice_tag:
|
| 205 |
+
print('jump empty segment')
|
| 206 |
+
_audio = np.zeros(length)
|
| 207 |
+
else:
|
| 208 |
+
out_audio, out_sr = self.infer(spk, tran, raw_path,
|
| 209 |
+
cluster_infer_ratio=cluster_infer_ratio,
|
| 210 |
+
auto_predict_f0=auto_predict_f0,
|
| 211 |
+
noice_scale=noice_scale
|
| 212 |
+
)
|
| 213 |
+
_audio = out_audio.cpu().numpy()
|
| 214 |
+
|
| 215 |
+
pad_len = int(self.target_sample * pad_seconds)
|
| 216 |
+
_audio = _audio[pad_len:-pad_len]
|
| 217 |
+
audio.extend(list(_audio))
|
| 218 |
+
return np.array(audio)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
class RealTimeVC:
|
| 222 |
+
def __init__(self):
|
| 223 |
+
self.last_chunk = None
|
| 224 |
+
self.last_o = None
|
| 225 |
+
self.chunk_len = 16000 # 区块长度
|
| 226 |
+
self.pre_len = 3840 # 交叉淡化长度,640的倍数
|
| 227 |
+
|
| 228 |
+
"""输入输出都是1维numpy 音频波形数组"""
|
| 229 |
+
|
| 230 |
+
def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path):
|
| 231 |
+
import maad
|
| 232 |
+
audio, sr = torchaudio.load(input_wav_path)
|
| 233 |
+
audio = audio.cpu().numpy()[0]
|
| 234 |
+
temp_wav = io.BytesIO()
|
| 235 |
+
if self.last_chunk is None:
|
| 236 |
+
input_wav_path.seek(0)
|
| 237 |
+
audio, sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path)
|
| 238 |
+
audio = audio.cpu().numpy()
|
| 239 |
+
self.last_chunk = audio[-self.pre_len:]
|
| 240 |
+
self.last_o = audio
|
| 241 |
+
return audio[-self.chunk_len:]
|
| 242 |
+
else:
|
| 243 |
+
audio = np.concatenate([self.last_chunk, audio])
|
| 244 |
+
soundfile.write(temp_wav, audio, sr, format="wav")
|
| 245 |
+
temp_wav.seek(0)
|
| 246 |
+
audio, sr = svc_model.infer(speaker_id, f_pitch_change, temp_wav)
|
| 247 |
+
audio = audio.cpu().numpy()
|
| 248 |
+
ret = maad.util.crossfade(self.last_o, audio, self.pre_len)
|
| 249 |
+
self.last_chunk = audio[-self.pre_len:]
|
| 250 |
+
self.last_o = audio
|
| 251 |
+
return ret[self.chunk_len:2 * self.chunk_len]
|
inference/infer_tool_grad.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
import time
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
import io
|
| 8 |
+
import librosa
|
| 9 |
+
import maad
|
| 10 |
+
import numpy as np
|
| 11 |
+
from inference import slicer
|
| 12 |
+
import parselmouth
|
| 13 |
+
import soundfile
|
| 14 |
+
import torch
|
| 15 |
+
import torchaudio
|
| 16 |
+
|
| 17 |
+
from hubert import hubert_model
|
| 18 |
+
import utils
|
| 19 |
+
from models import SynthesizerTrn
|
| 20 |
+
logging.getLogger('numba').setLevel(logging.WARNING)
|
| 21 |
+
logging.getLogger('matplotlib').setLevel(logging.WARNING)
|
| 22 |
+
|
| 23 |
+
def resize2d_f0(x, target_len):
|
| 24 |
+
source = np.array(x)
|
| 25 |
+
source[source < 0.001] = np.nan
|
| 26 |
+
target = np.interp(np.arange(0, len(source) * target_len, len(source)) / target_len, np.arange(0, len(source)),
|
| 27 |
+
source)
|
| 28 |
+
res = np.nan_to_num(target)
|
| 29 |
+
return res
|
| 30 |
+
|
| 31 |
+
def get_f0(x, p_len,f0_up_key=0):
|
| 32 |
+
|
| 33 |
+
time_step = 160 / 16000 * 1000
|
| 34 |
+
f0_min = 50
|
| 35 |
+
f0_max = 1100
|
| 36 |
+
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
| 37 |
+
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
| 38 |
+
|
| 39 |
+
f0 = parselmouth.Sound(x, 16000).to_pitch_ac(
|
| 40 |
+
time_step=time_step / 1000, voicing_threshold=0.6,
|
| 41 |
+
pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
|
| 42 |
+
|
| 43 |
+
pad_size=(p_len - len(f0) + 1) // 2
|
| 44 |
+
if(pad_size>0 or p_len - len(f0) - pad_size>0):
|
| 45 |
+
f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
|
| 46 |
+
|
| 47 |
+
f0 *= pow(2, f0_up_key / 12)
|
| 48 |
+
f0_mel = 1127 * np.log(1 + f0 / 700)
|
| 49 |
+
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1
|
| 50 |
+
f0_mel[f0_mel <= 1] = 1
|
| 51 |
+
f0_mel[f0_mel > 255] = 255
|
| 52 |
+
f0_coarse = np.rint(f0_mel).astype(np.int)
|
| 53 |
+
return f0_coarse, f0
|
| 54 |
+
|
| 55 |
+
def clean_pitch(input_pitch):
|
| 56 |
+
num_nan = np.sum(input_pitch == 1)
|
| 57 |
+
if num_nan / len(input_pitch) > 0.9:
|
| 58 |
+
input_pitch[input_pitch != 1] = 1
|
| 59 |
+
return input_pitch
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def plt_pitch(input_pitch):
|
| 63 |
+
input_pitch = input_pitch.astype(float)
|
| 64 |
+
input_pitch[input_pitch == 1] = np.nan
|
| 65 |
+
return input_pitch
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def f0_to_pitch(ff):
|
| 69 |
+
f0_pitch = 69 + 12 * np.log2(ff / 440)
|
| 70 |
+
return f0_pitch
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def fill_a_to_b(a, b):
|
| 74 |
+
if len(a) < len(b):
|
| 75 |
+
for _ in range(0, len(b) - len(a)):
|
| 76 |
+
a.append(a[0])
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def mkdir(paths: list):
|
| 80 |
+
for path in paths:
|
| 81 |
+
if not os.path.exists(path):
|
| 82 |
+
os.mkdir(path)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class VitsSvc(object):
|
| 86 |
+
def __init__(self):
|
| 87 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 88 |
+
self.SVCVITS = None
|
| 89 |
+
self.hps = None
|
| 90 |
+
self.speakers = None
|
| 91 |
+
self.hubert_soft = utils.get_hubert_model()
|
| 92 |
+
|
| 93 |
+
def set_device(self, device):
|
| 94 |
+
self.device = torch.device(device)
|
| 95 |
+
self.hubert_soft.to(self.device)
|
| 96 |
+
if self.SVCVITS != None:
|
| 97 |
+
self.SVCVITS.to(self.device)
|
| 98 |
+
|
| 99 |
+
def loadCheckpoint(self, path):
|
| 100 |
+
self.hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json")
|
| 101 |
+
self.SVCVITS = SynthesizerTrn(
|
| 102 |
+
self.hps.data.filter_length // 2 + 1,
|
| 103 |
+
self.hps.train.segment_size // self.hps.data.hop_length,
|
| 104 |
+
**self.hps.model)
|
| 105 |
+
_ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", self.SVCVITS, None)
|
| 106 |
+
_ = self.SVCVITS.eval().to(self.device)
|
| 107 |
+
self.speakers = self.hps.spk
|
| 108 |
+
|
| 109 |
+
def get_units(self, source, sr):
|
| 110 |
+
source = source.unsqueeze(0).to(self.device)
|
| 111 |
+
with torch.inference_mode():
|
| 112 |
+
units = self.hubert_soft.units(source)
|
| 113 |
+
return units
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def get_unit_pitch(self, in_path, tran):
|
| 117 |
+
source, sr = torchaudio.load(in_path)
|
| 118 |
+
source = torchaudio.functional.resample(source, sr, 16000)
|
| 119 |
+
if len(source.shape) == 2 and source.shape[1] >= 2:
|
| 120 |
+
source = torch.mean(source, dim=0).unsqueeze(0)
|
| 121 |
+
soft = self.get_units(source, sr).squeeze(0).cpu().numpy()
|
| 122 |
+
f0_coarse, f0 = get_f0(source.cpu().numpy()[0], soft.shape[0]*2, tran)
|
| 123 |
+
return soft, f0
|
| 124 |
+
|
| 125 |
+
def infer(self, speaker_id, tran, raw_path):
|
| 126 |
+
speaker_id = self.speakers[speaker_id]
|
| 127 |
+
sid = torch.LongTensor([int(speaker_id)]).to(self.device).unsqueeze(0)
|
| 128 |
+
soft, pitch = self.get_unit_pitch(raw_path, tran)
|
| 129 |
+
f0 = torch.FloatTensor(clean_pitch(pitch)).unsqueeze(0).to(self.device)
|
| 130 |
+
stn_tst = torch.FloatTensor(soft)
|
| 131 |
+
with torch.no_grad():
|
| 132 |
+
x_tst = stn_tst.unsqueeze(0).to(self.device)
|
| 133 |
+
x_tst = torch.repeat_interleave(x_tst, repeats=2, dim=1).transpose(1, 2)
|
| 134 |
+
audio = self.SVCVITS.infer(x_tst, f0=f0, g=sid)[0,0].data.float()
|
| 135 |
+
return audio, audio.shape[-1]
|
| 136 |
+
|
| 137 |
+
def inference(self,srcaudio,chara,tran,slice_db):
|
| 138 |
+
sampling_rate, audio = srcaudio
|
| 139 |
+
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
|
| 140 |
+
if len(audio.shape) > 1:
|
| 141 |
+
audio = librosa.to_mono(audio.transpose(1, 0))
|
| 142 |
+
if sampling_rate != 16000:
|
| 143 |
+
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
|
| 144 |
+
soundfile.write("tmpwav.wav", audio, 16000, format="wav")
|
| 145 |
+
chunks = slicer.cut("tmpwav.wav", db_thresh=slice_db)
|
| 146 |
+
audio_data, audio_sr = slicer.chunks2audio("tmpwav.wav", chunks)
|
| 147 |
+
audio = []
|
| 148 |
+
for (slice_tag, data) in audio_data:
|
| 149 |
+
length = int(np.ceil(len(data) / audio_sr * self.hps.data.sampling_rate))
|
| 150 |
+
raw_path = io.BytesIO()
|
| 151 |
+
soundfile.write(raw_path, data, audio_sr, format="wav")
|
| 152 |
+
raw_path.seek(0)
|
| 153 |
+
if slice_tag:
|
| 154 |
+
_audio = np.zeros(length)
|
| 155 |
+
else:
|
| 156 |
+
out_audio, out_sr = self.infer(chara, tran, raw_path)
|
| 157 |
+
_audio = out_audio.cpu().numpy()
|
| 158 |
+
audio.extend(list(_audio))
|
| 159 |
+
audio = (np.array(audio) * 32768.0).astype('int16')
|
| 160 |
+
return (self.hps.data.sampling_rate,audio)
|
inference/slicer.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import librosa
|
| 2 |
+
import torch
|
| 3 |
+
import torchaudio
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Slicer:
|
| 7 |
+
def __init__(self,
|
| 8 |
+
sr: int,
|
| 9 |
+
threshold: float = -40.,
|
| 10 |
+
min_length: int = 5000,
|
| 11 |
+
min_interval: int = 300,
|
| 12 |
+
hop_size: int = 20,
|
| 13 |
+
max_sil_kept: int = 5000):
|
| 14 |
+
if not min_length >= min_interval >= hop_size:
|
| 15 |
+
raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size')
|
| 16 |
+
if not max_sil_kept >= hop_size:
|
| 17 |
+
raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size')
|
| 18 |
+
min_interval = sr * min_interval / 1000
|
| 19 |
+
self.threshold = 10 ** (threshold / 20.)
|
| 20 |
+
self.hop_size = round(sr * hop_size / 1000)
|
| 21 |
+
self.win_size = min(round(min_interval), 4 * self.hop_size)
|
| 22 |
+
self.min_length = round(sr * min_length / 1000 / self.hop_size)
|
| 23 |
+
self.min_interval = round(min_interval / self.hop_size)
|
| 24 |
+
self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
|
| 25 |
+
|
| 26 |
+
def _apply_slice(self, waveform, begin, end):
|
| 27 |
+
if len(waveform.shape) > 1:
|
| 28 |
+
return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)]
|
| 29 |
+
else:
|
| 30 |
+
return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)]
|
| 31 |
+
|
| 32 |
+
# @timeit
|
| 33 |
+
def slice(self, waveform):
|
| 34 |
+
if len(waveform.shape) > 1:
|
| 35 |
+
samples = librosa.to_mono(waveform)
|
| 36 |
+
else:
|
| 37 |
+
samples = waveform
|
| 38 |
+
if samples.shape[0] <= self.min_length:
|
| 39 |
+
return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
|
| 40 |
+
rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
|
| 41 |
+
sil_tags = []
|
| 42 |
+
silence_start = None
|
| 43 |
+
clip_start = 0
|
| 44 |
+
for i, rms in enumerate(rms_list):
|
| 45 |
+
# Keep looping while frame is silent.
|
| 46 |
+
if rms < self.threshold:
|
| 47 |
+
# Record start of silent frames.
|
| 48 |
+
if silence_start is None:
|
| 49 |
+
silence_start = i
|
| 50 |
+
continue
|
| 51 |
+
# Keep looping while frame is not silent and silence start has not been recorded.
|
| 52 |
+
if silence_start is None:
|
| 53 |
+
continue
|
| 54 |
+
# Clear recorded silence start if interval is not enough or clip is too short
|
| 55 |
+
is_leading_silence = silence_start == 0 and i > self.max_sil_kept
|
| 56 |
+
need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length
|
| 57 |
+
if not is_leading_silence and not need_slice_middle:
|
| 58 |
+
silence_start = None
|
| 59 |
+
continue
|
| 60 |
+
# Need slicing. Record the range of silent frames to be removed.
|
| 61 |
+
if i - silence_start <= self.max_sil_kept:
|
| 62 |
+
pos = rms_list[silence_start: i + 1].argmin() + silence_start
|
| 63 |
+
if silence_start == 0:
|
| 64 |
+
sil_tags.append((0, pos))
|
| 65 |
+
else:
|
| 66 |
+
sil_tags.append((pos, pos))
|
| 67 |
+
clip_start = pos
|
| 68 |
+
elif i - silence_start <= self.max_sil_kept * 2:
|
| 69 |
+
pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin()
|
| 70 |
+
pos += i - self.max_sil_kept
|
| 71 |
+
pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
|
| 72 |
+
pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
|
| 73 |
+
if silence_start == 0:
|
| 74 |
+
sil_tags.append((0, pos_r))
|
| 75 |
+
clip_start = pos_r
|
| 76 |
+
else:
|
| 77 |
+
sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
|
| 78 |
+
clip_start = max(pos_r, pos)
|
| 79 |
+
else:
|
| 80 |
+
pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
|
| 81 |
+
pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
|
| 82 |
+
if silence_start == 0:
|
| 83 |
+
sil_tags.append((0, pos_r))
|
| 84 |
+
else:
|
| 85 |
+
sil_tags.append((pos_l, pos_r))
|
| 86 |
+
clip_start = pos_r
|
| 87 |
+
silence_start = None
|
| 88 |
+
# Deal with trailing silence.
|
| 89 |
+
total_frames = rms_list.shape[0]
|
| 90 |
+
if silence_start is not None and total_frames - silence_start >= self.min_interval:
|
| 91 |
+
silence_end = min(total_frames, silence_start + self.max_sil_kept)
|
| 92 |
+
pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start
|
| 93 |
+
sil_tags.append((pos, total_frames + 1))
|
| 94 |
+
# Apply and return slices.
|
| 95 |
+
if len(sil_tags) == 0:
|
| 96 |
+
return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
|
| 97 |
+
else:
|
| 98 |
+
chunks = []
|
| 99 |
+
# 第一段静音并非从头开始,补上有声片段
|
| 100 |
+
if sil_tags[0][0]:
|
| 101 |
+
chunks.append(
|
| 102 |
+
{"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"})
|
| 103 |
+
for i in range(0, len(sil_tags)):
|
| 104 |
+
# 标识有声片段(跳过第一段)
|
| 105 |
+
if i:
|
| 106 |
+
chunks.append({"slice": False,
|
| 107 |
+
"split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"})
|
| 108 |
+
# 标识所有静音片段
|
| 109 |
+
chunks.append({"slice": True,
|
| 110 |
+
"split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"})
|
| 111 |
+
# 最后一段静音并非结尾,补上结尾片段
|
| 112 |
+
if sil_tags[-1][1] * self.hop_size < len(waveform):
|
| 113 |
+
chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"})
|
| 114 |
+
chunk_dict = {}
|
| 115 |
+
for i in range(len(chunks)):
|
| 116 |
+
chunk_dict[str(i)] = chunks[i]
|
| 117 |
+
return chunk_dict
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def cut(audio_path, db_thresh=-30, min_len=5000):
|
| 121 |
+
audio, sr = librosa.load(audio_path, sr=None)
|
| 122 |
+
slicer = Slicer(
|
| 123 |
+
sr=sr,
|
| 124 |
+
threshold=db_thresh,
|
| 125 |
+
min_length=min_len
|
| 126 |
+
)
|
| 127 |
+
chunks = slicer.slice(audio)
|
| 128 |
+
return chunks
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def chunks2audio(audio_path, chunks):
|
| 132 |
+
chunks = dict(chunks)
|
| 133 |
+
audio, sr = torchaudio.load(audio_path)
|
| 134 |
+
if len(audio.shape) == 2 and audio.shape[1] >= 2:
|
| 135 |
+
audio = torch.mean(audio, dim=0).unsqueeze(0)
|
| 136 |
+
audio = audio.cpu().numpy()[0]
|
| 137 |
+
result = []
|
| 138 |
+
for k, v in chunks.items():
|
| 139 |
+
tag = v["split_time"].split(",")
|
| 140 |
+
if tag[0] != tag[1]:
|
| 141 |
+
result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
|
| 142 |
+
return result, sr
|
inference_main.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import logging
|
| 3 |
+
import time
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
import librosa
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
import numpy as np
|
| 9 |
+
import soundfile
|
| 10 |
+
|
| 11 |
+
from inference import infer_tool
|
| 12 |
+
from inference import slicer
|
| 13 |
+
from inference.infer_tool import Svc
|
| 14 |
+
|
| 15 |
+
logging.getLogger('numba').setLevel(logging.WARNING)
|
| 16 |
+
chunks_dict = infer_tool.read_temp("inference/chunks_temp.json")
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def main():
|
| 21 |
+
import argparse
|
| 22 |
+
|
| 23 |
+
parser = argparse.ArgumentParser(description='sovits4 inference')
|
| 24 |
+
|
| 25 |
+
# 一定要设置的部分
|
| 26 |
+
parser.add_argument('-m', '--model_path', type=str, default="logs/44k/G_0.pth", help='模型路径')
|
| 27 |
+
parser.add_argument('-c', '--config_path', type=str, default="configs/config.json", help='配置文件路径')
|
| 28 |
+
parser.add_argument('-n', '--clean_names', type=str, nargs='+', default=["君の知らない物語-src.wav"], help='wav文件名列表,放在raw文件夹下')
|
| 29 |
+
parser.add_argument('-t', '--trans', type=int, nargs='+', default=[0], help='音高调整,支持正负(半音)')
|
| 30 |
+
parser.add_argument('-s', '--spk_list', type=str, nargs='+', default=['nen'], help='合成目标说话人名称')
|
| 31 |
+
|
| 32 |
+
# 可选项部分
|
| 33 |
+
parser.add_argument('-a', '--auto_predict_f0', action='store_true', default=False,
|
| 34 |
+
help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调')
|
| 35 |
+
parser.add_argument('-cm', '--cluster_model_path', type=str, default="logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填')
|
| 36 |
+
parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=0, help='聚类方案占比,范围0-1,若没有训练聚类模型则填0即可')
|
| 37 |
+
|
| 38 |
+
# 不用动的部分
|
| 39 |
+
parser.add_argument('-sd', '--slice_db', type=int, default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50')
|
| 40 |
+
parser.add_argument('-d', '--device', type=str, default=None, help='推理设备,None则为自动选择cpu和gpu')
|
| 41 |
+
parser.add_argument('-ns', '--noice_scale', type=float, default=0.4, help='噪音级别,会影响咬字和音质,较为玄学')
|
| 42 |
+
parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现')
|
| 43 |
+
parser.add_argument('-wf', '--wav_format', type=str, default='flac', help='音频输出格式')
|
| 44 |
+
|
| 45 |
+
args = parser.parse_args()
|
| 46 |
+
|
| 47 |
+
svc_model = Svc(args.model_path, args.config_path, args.device, args.cluster_model_path)
|
| 48 |
+
infer_tool.mkdir(["raw", "results"])
|
| 49 |
+
clean_names = args.clean_names
|
| 50 |
+
trans = args.trans
|
| 51 |
+
spk_list = args.spk_list
|
| 52 |
+
slice_db = args.slice_db
|
| 53 |
+
wav_format = args.wav_format
|
| 54 |
+
auto_predict_f0 = args.auto_predict_f0
|
| 55 |
+
cluster_infer_ratio = args.cluster_infer_ratio
|
| 56 |
+
noice_scale = args.noice_scale
|
| 57 |
+
pad_seconds = args.pad_seconds
|
| 58 |
+
|
| 59 |
+
infer_tool.fill_a_to_b(trans, clean_names)
|
| 60 |
+
for clean_name, tran in zip(clean_names, trans):
|
| 61 |
+
raw_audio_path = f"raw/{clean_name}"
|
| 62 |
+
if "." not in raw_audio_path:
|
| 63 |
+
raw_audio_path += ".wav"
|
| 64 |
+
infer_tool.format_wav(raw_audio_path)
|
| 65 |
+
wav_path = Path(raw_audio_path).with_suffix('.wav')
|
| 66 |
+
chunks = slicer.cut(wav_path, db_thresh=slice_db)
|
| 67 |
+
audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
|
| 68 |
+
|
| 69 |
+
for spk in spk_list:
|
| 70 |
+
audio = []
|
| 71 |
+
for (slice_tag, data) in audio_data:
|
| 72 |
+
print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
|
| 73 |
+
|
| 74 |
+
length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample))
|
| 75 |
+
if slice_tag:
|
| 76 |
+
print('jump empty segment')
|
| 77 |
+
_audio = np.zeros(length)
|
| 78 |
+
else:
|
| 79 |
+
# padd
|
| 80 |
+
pad_len = int(audio_sr * pad_seconds)
|
| 81 |
+
data = np.concatenate([np.zeros([pad_len]), data, np.zeros([pad_len])])
|
| 82 |
+
raw_path = io.BytesIO()
|
| 83 |
+
soundfile.write(raw_path, data, audio_sr, format="wav")
|
| 84 |
+
raw_path.seek(0)
|
| 85 |
+
out_audio, out_sr = svc_model.infer(spk, tran, raw_path,
|
| 86 |
+
cluster_infer_ratio=cluster_infer_ratio,
|
| 87 |
+
auto_predict_f0=auto_predict_f0,
|
| 88 |
+
noice_scale=noice_scale
|
| 89 |
+
)
|
| 90 |
+
_audio = out_audio.cpu().numpy()
|
| 91 |
+
pad_len = int(svc_model.target_sample * pad_seconds)
|
| 92 |
+
_audio = _audio[pad_len:-pad_len]
|
| 93 |
+
|
| 94 |
+
audio.extend(list(infer_tool.pad_array(_audio, length)))
|
| 95 |
+
key = "auto" if auto_predict_f0 else f"{tran}key"
|
| 96 |
+
cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}"
|
| 97 |
+
res_path = f'./results/{clean_name}_{key}_{spk}{cluster_name}.{wav_format}'
|
| 98 |
+
soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format)
|
| 99 |
+
|
| 100 |
+
if __name__ == '__main__':
|
| 101 |
+
main()
|
logs/44k/momoi_E13_G40.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:745c66b0d7e4dec65cb77625a610d66a14dc4a5579b0a951cb21698b49e34671
|
| 3 |
+
size 542789405
|
logs/44k/put_pretrained_model_here
ADDED
|
File without changes
|
models.py
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import math
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
import modules.attentions as attentions
|
| 8 |
+
import modules.commons as commons
|
| 9 |
+
import modules.modules as modules
|
| 10 |
+
|
| 11 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
| 12 |
+
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
| 13 |
+
|
| 14 |
+
import utils
|
| 15 |
+
from modules.commons import init_weights, get_padding
|
| 16 |
+
from vdecoder.hifigan.models import Generator
|
| 17 |
+
from utils import f0_to_coarse
|
| 18 |
+
|
| 19 |
+
class ResidualCouplingBlock(nn.Module):
|
| 20 |
+
def __init__(self,
|
| 21 |
+
channels,
|
| 22 |
+
hidden_channels,
|
| 23 |
+
kernel_size,
|
| 24 |
+
dilation_rate,
|
| 25 |
+
n_layers,
|
| 26 |
+
n_flows=4,
|
| 27 |
+
gin_channels=0):
|
| 28 |
+
super().__init__()
|
| 29 |
+
self.channels = channels
|
| 30 |
+
self.hidden_channels = hidden_channels
|
| 31 |
+
self.kernel_size = kernel_size
|
| 32 |
+
self.dilation_rate = dilation_rate
|
| 33 |
+
self.n_layers = n_layers
|
| 34 |
+
self.n_flows = n_flows
|
| 35 |
+
self.gin_channels = gin_channels
|
| 36 |
+
|
| 37 |
+
self.flows = nn.ModuleList()
|
| 38 |
+
for i in range(n_flows):
|
| 39 |
+
self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
|
| 40 |
+
self.flows.append(modules.Flip())
|
| 41 |
+
|
| 42 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
| 43 |
+
if not reverse:
|
| 44 |
+
for flow in self.flows:
|
| 45 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
| 46 |
+
else:
|
| 47 |
+
for flow in reversed(self.flows):
|
| 48 |
+
x = flow(x, x_mask, g=g, reverse=reverse)
|
| 49 |
+
return x
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class Encoder(nn.Module):
|
| 53 |
+
def __init__(self,
|
| 54 |
+
in_channels,
|
| 55 |
+
out_channels,
|
| 56 |
+
hidden_channels,
|
| 57 |
+
kernel_size,
|
| 58 |
+
dilation_rate,
|
| 59 |
+
n_layers,
|
| 60 |
+
gin_channels=0):
|
| 61 |
+
super().__init__()
|
| 62 |
+
self.in_channels = in_channels
|
| 63 |
+
self.out_channels = out_channels
|
| 64 |
+
self.hidden_channels = hidden_channels
|
| 65 |
+
self.kernel_size = kernel_size
|
| 66 |
+
self.dilation_rate = dilation_rate
|
| 67 |
+
self.n_layers = n_layers
|
| 68 |
+
self.gin_channels = gin_channels
|
| 69 |
+
|
| 70 |
+
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
| 71 |
+
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
| 72 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
| 73 |
+
|
| 74 |
+
def forward(self, x, x_lengths, g=None):
|
| 75 |
+
# print(x.shape,x_lengths.shape)
|
| 76 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
| 77 |
+
x = self.pre(x) * x_mask
|
| 78 |
+
x = self.enc(x, x_mask, g=g)
|
| 79 |
+
stats = self.proj(x) * x_mask
|
| 80 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
| 81 |
+
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
| 82 |
+
return z, m, logs, x_mask
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class TextEncoder(nn.Module):
|
| 86 |
+
def __init__(self,
|
| 87 |
+
out_channels,
|
| 88 |
+
hidden_channels,
|
| 89 |
+
kernel_size,
|
| 90 |
+
n_layers,
|
| 91 |
+
gin_channels=0,
|
| 92 |
+
filter_channels=None,
|
| 93 |
+
n_heads=None,
|
| 94 |
+
p_dropout=None):
|
| 95 |
+
super().__init__()
|
| 96 |
+
self.out_channels = out_channels
|
| 97 |
+
self.hidden_channels = hidden_channels
|
| 98 |
+
self.kernel_size = kernel_size
|
| 99 |
+
self.n_layers = n_layers
|
| 100 |
+
self.gin_channels = gin_channels
|
| 101 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
| 102 |
+
self.f0_emb = nn.Embedding(256, hidden_channels)
|
| 103 |
+
|
| 104 |
+
self.enc_ = attentions.Encoder(
|
| 105 |
+
hidden_channels,
|
| 106 |
+
filter_channels,
|
| 107 |
+
n_heads,
|
| 108 |
+
n_layers,
|
| 109 |
+
kernel_size,
|
| 110 |
+
p_dropout)
|
| 111 |
+
|
| 112 |
+
def forward(self, x, x_mask, f0=None, noice_scale=1):
|
| 113 |
+
x = x + self.f0_emb(f0).transpose(1,2)
|
| 114 |
+
x = self.enc_(x * x_mask, x_mask)
|
| 115 |
+
stats = self.proj(x) * x_mask
|
| 116 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
| 117 |
+
z = (m + torch.randn_like(m) * torch.exp(logs) * noice_scale) * x_mask
|
| 118 |
+
|
| 119 |
+
return z, m, logs, x_mask
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class DiscriminatorP(torch.nn.Module):
|
| 124 |
+
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
| 125 |
+
super(DiscriminatorP, self).__init__()
|
| 126 |
+
self.period = period
|
| 127 |
+
self.use_spectral_norm = use_spectral_norm
|
| 128 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
| 129 |
+
self.convs = nn.ModuleList([
|
| 130 |
+
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 131 |
+
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 132 |
+
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 133 |
+
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 134 |
+
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
|
| 135 |
+
])
|
| 136 |
+
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
| 137 |
+
|
| 138 |
+
def forward(self, x):
|
| 139 |
+
fmap = []
|
| 140 |
+
|
| 141 |
+
# 1d to 2d
|
| 142 |
+
b, c, t = x.shape
|
| 143 |
+
if t % self.period != 0: # pad first
|
| 144 |
+
n_pad = self.period - (t % self.period)
|
| 145 |
+
x = F.pad(x, (0, n_pad), "reflect")
|
| 146 |
+
t = t + n_pad
|
| 147 |
+
x = x.view(b, c, t // self.period, self.period)
|
| 148 |
+
|
| 149 |
+
for l in self.convs:
|
| 150 |
+
x = l(x)
|
| 151 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
| 152 |
+
fmap.append(x)
|
| 153 |
+
x = self.conv_post(x)
|
| 154 |
+
fmap.append(x)
|
| 155 |
+
x = torch.flatten(x, 1, -1)
|
| 156 |
+
|
| 157 |
+
return x, fmap
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
class DiscriminatorS(torch.nn.Module):
|
| 161 |
+
def __init__(self, use_spectral_norm=False):
|
| 162 |
+
super(DiscriminatorS, self).__init__()
|
| 163 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
| 164 |
+
self.convs = nn.ModuleList([
|
| 165 |
+
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
| 166 |
+
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
| 167 |
+
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
| 168 |
+
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
| 169 |
+
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
| 170 |
+
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
| 171 |
+
])
|
| 172 |
+
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
| 173 |
+
|
| 174 |
+
def forward(self, x):
|
| 175 |
+
fmap = []
|
| 176 |
+
|
| 177 |
+
for l in self.convs:
|
| 178 |
+
x = l(x)
|
| 179 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
| 180 |
+
fmap.append(x)
|
| 181 |
+
x = self.conv_post(x)
|
| 182 |
+
fmap.append(x)
|
| 183 |
+
x = torch.flatten(x, 1, -1)
|
| 184 |
+
|
| 185 |
+
return x, fmap
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class MultiPeriodDiscriminator(torch.nn.Module):
|
| 189 |
+
def __init__(self, use_spectral_norm=False):
|
| 190 |
+
super(MultiPeriodDiscriminator, self).__init__()
|
| 191 |
+
periods = [2,3,5,7,11]
|
| 192 |
+
|
| 193 |
+
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
| 194 |
+
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
|
| 195 |
+
self.discriminators = nn.ModuleList(discs)
|
| 196 |
+
|
| 197 |
+
def forward(self, y, y_hat):
|
| 198 |
+
y_d_rs = []
|
| 199 |
+
y_d_gs = []
|
| 200 |
+
fmap_rs = []
|
| 201 |
+
fmap_gs = []
|
| 202 |
+
for i, d in enumerate(self.discriminators):
|
| 203 |
+
y_d_r, fmap_r = d(y)
|
| 204 |
+
y_d_g, fmap_g = d(y_hat)
|
| 205 |
+
y_d_rs.append(y_d_r)
|
| 206 |
+
y_d_gs.append(y_d_g)
|
| 207 |
+
fmap_rs.append(fmap_r)
|
| 208 |
+
fmap_gs.append(fmap_g)
|
| 209 |
+
|
| 210 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
class SpeakerEncoder(torch.nn.Module):
|
| 214 |
+
def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256):
|
| 215 |
+
super(SpeakerEncoder, self).__init__()
|
| 216 |
+
self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True)
|
| 217 |
+
self.linear = nn.Linear(model_hidden_size, model_embedding_size)
|
| 218 |
+
self.relu = nn.ReLU()
|
| 219 |
+
|
| 220 |
+
def forward(self, mels):
|
| 221 |
+
self.lstm.flatten_parameters()
|
| 222 |
+
_, (hidden, _) = self.lstm(mels)
|
| 223 |
+
embeds_raw = self.relu(self.linear(hidden[-1]))
|
| 224 |
+
return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
|
| 225 |
+
|
| 226 |
+
def compute_partial_slices(self, total_frames, partial_frames, partial_hop):
|
| 227 |
+
mel_slices = []
|
| 228 |
+
for i in range(0, total_frames-partial_frames, partial_hop):
|
| 229 |
+
mel_range = torch.arange(i, i+partial_frames)
|
| 230 |
+
mel_slices.append(mel_range)
|
| 231 |
+
|
| 232 |
+
return mel_slices
|
| 233 |
+
|
| 234 |
+
def embed_utterance(self, mel, partial_frames=128, partial_hop=64):
|
| 235 |
+
mel_len = mel.size(1)
|
| 236 |
+
last_mel = mel[:,-partial_frames:]
|
| 237 |
+
|
| 238 |
+
if mel_len > partial_frames:
|
| 239 |
+
mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop)
|
| 240 |
+
mels = list(mel[:,s] for s in mel_slices)
|
| 241 |
+
mels.append(last_mel)
|
| 242 |
+
mels = torch.stack(tuple(mels), 0).squeeze(1)
|
| 243 |
+
|
| 244 |
+
with torch.no_grad():
|
| 245 |
+
partial_embeds = self(mels)
|
| 246 |
+
embed = torch.mean(partial_embeds, axis=0).unsqueeze(0)
|
| 247 |
+
#embed = embed / torch.linalg.norm(embed, 2)
|
| 248 |
+
else:
|
| 249 |
+
with torch.no_grad():
|
| 250 |
+
embed = self(last_mel)
|
| 251 |
+
|
| 252 |
+
return embed
|
| 253 |
+
|
| 254 |
+
class F0Decoder(nn.Module):
|
| 255 |
+
def __init__(self,
|
| 256 |
+
out_channels,
|
| 257 |
+
hidden_channels,
|
| 258 |
+
filter_channels,
|
| 259 |
+
n_heads,
|
| 260 |
+
n_layers,
|
| 261 |
+
kernel_size,
|
| 262 |
+
p_dropout,
|
| 263 |
+
spk_channels=0):
|
| 264 |
+
super().__init__()
|
| 265 |
+
self.out_channels = out_channels
|
| 266 |
+
self.hidden_channels = hidden_channels
|
| 267 |
+
self.filter_channels = filter_channels
|
| 268 |
+
self.n_heads = n_heads
|
| 269 |
+
self.n_layers = n_layers
|
| 270 |
+
self.kernel_size = kernel_size
|
| 271 |
+
self.p_dropout = p_dropout
|
| 272 |
+
self.spk_channels = spk_channels
|
| 273 |
+
|
| 274 |
+
self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1)
|
| 275 |
+
self.decoder = attentions.FFT(
|
| 276 |
+
hidden_channels,
|
| 277 |
+
filter_channels,
|
| 278 |
+
n_heads,
|
| 279 |
+
n_layers,
|
| 280 |
+
kernel_size,
|
| 281 |
+
p_dropout)
|
| 282 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
| 283 |
+
self.f0_prenet = nn.Conv1d(1, hidden_channels , 3, padding=1)
|
| 284 |
+
self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
|
| 285 |
+
|
| 286 |
+
def forward(self, x, norm_f0, x_mask, spk_emb=None):
|
| 287 |
+
x = torch.detach(x)
|
| 288 |
+
if (spk_emb is not None):
|
| 289 |
+
x = x + self.cond(spk_emb)
|
| 290 |
+
x += self.f0_prenet(norm_f0)
|
| 291 |
+
x = self.prenet(x) * x_mask
|
| 292 |
+
x = self.decoder(x * x_mask, x_mask)
|
| 293 |
+
x = self.proj(x) * x_mask
|
| 294 |
+
return x
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
class SynthesizerTrn(nn.Module):
|
| 298 |
+
"""
|
| 299 |
+
Synthesizer for Training
|
| 300 |
+
"""
|
| 301 |
+
|
| 302 |
+
def __init__(self,
|
| 303 |
+
spec_channels,
|
| 304 |
+
segment_size,
|
| 305 |
+
inter_channels,
|
| 306 |
+
hidden_channels,
|
| 307 |
+
filter_channels,
|
| 308 |
+
n_heads,
|
| 309 |
+
n_layers,
|
| 310 |
+
kernel_size,
|
| 311 |
+
p_dropout,
|
| 312 |
+
resblock,
|
| 313 |
+
resblock_kernel_sizes,
|
| 314 |
+
resblock_dilation_sizes,
|
| 315 |
+
upsample_rates,
|
| 316 |
+
upsample_initial_channel,
|
| 317 |
+
upsample_kernel_sizes,
|
| 318 |
+
gin_channels,
|
| 319 |
+
ssl_dim,
|
| 320 |
+
n_speakers,
|
| 321 |
+
sampling_rate=44100,
|
| 322 |
+
**kwargs):
|
| 323 |
+
|
| 324 |
+
super().__init__()
|
| 325 |
+
self.spec_channels = spec_channels
|
| 326 |
+
self.inter_channels = inter_channels
|
| 327 |
+
self.hidden_channels = hidden_channels
|
| 328 |
+
self.filter_channels = filter_channels
|
| 329 |
+
self.n_heads = n_heads
|
| 330 |
+
self.n_layers = n_layers
|
| 331 |
+
self.kernel_size = kernel_size
|
| 332 |
+
self.p_dropout = p_dropout
|
| 333 |
+
self.resblock = resblock
|
| 334 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
| 335 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
| 336 |
+
self.upsample_rates = upsample_rates
|
| 337 |
+
self.upsample_initial_channel = upsample_initial_channel
|
| 338 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
| 339 |
+
self.segment_size = segment_size
|
| 340 |
+
self.gin_channels = gin_channels
|
| 341 |
+
self.ssl_dim = ssl_dim
|
| 342 |
+
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
| 343 |
+
|
| 344 |
+
self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2)
|
| 345 |
+
|
| 346 |
+
self.enc_p = TextEncoder(
|
| 347 |
+
inter_channels,
|
| 348 |
+
hidden_channels,
|
| 349 |
+
filter_channels=filter_channels,
|
| 350 |
+
n_heads=n_heads,
|
| 351 |
+
n_layers=n_layers,
|
| 352 |
+
kernel_size=kernel_size,
|
| 353 |
+
p_dropout=p_dropout
|
| 354 |
+
)
|
| 355 |
+
hps = {
|
| 356 |
+
"sampling_rate": sampling_rate,
|
| 357 |
+
"inter_channels": inter_channels,
|
| 358 |
+
"resblock": resblock,
|
| 359 |
+
"resblock_kernel_sizes": resblock_kernel_sizes,
|
| 360 |
+
"resblock_dilation_sizes": resblock_dilation_sizes,
|
| 361 |
+
"upsample_rates": upsample_rates,
|
| 362 |
+
"upsample_initial_channel": upsample_initial_channel,
|
| 363 |
+
"upsample_kernel_sizes": upsample_kernel_sizes,
|
| 364 |
+
"gin_channels": gin_channels,
|
| 365 |
+
}
|
| 366 |
+
self.dec = Generator(h=hps)
|
| 367 |
+
self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
|
| 368 |
+
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
| 369 |
+
self.f0_decoder = F0Decoder(
|
| 370 |
+
1,
|
| 371 |
+
hidden_channels,
|
| 372 |
+
filter_channels,
|
| 373 |
+
n_heads,
|
| 374 |
+
n_layers,
|
| 375 |
+
kernel_size,
|
| 376 |
+
p_dropout,
|
| 377 |
+
spk_channels=gin_channels
|
| 378 |
+
)
|
| 379 |
+
self.emb_uv = nn.Embedding(2, hidden_channels)
|
| 380 |
+
|
| 381 |
+
def forward(self, c, f0, uv, spec, g=None, c_lengths=None, spec_lengths=None):
|
| 382 |
+
g = self.emb_g(g).transpose(1,2)
|
| 383 |
+
# ssl prenet
|
| 384 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype)
|
| 385 |
+
x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1,2)
|
| 386 |
+
|
| 387 |
+
# f0 predict
|
| 388 |
+
lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500
|
| 389 |
+
norm_lf0 = utils.normalize_f0(lf0, x_mask, uv)
|
| 390 |
+
pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g)
|
| 391 |
+
|
| 392 |
+
# encoder
|
| 393 |
+
z_ptemp, m_p, logs_p, _ = self.enc_p(x, x_mask, f0=f0_to_coarse(f0))
|
| 394 |
+
z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g)
|
| 395 |
+
|
| 396 |
+
# flow
|
| 397 |
+
z_p = self.flow(z, spec_mask, g=g)
|
| 398 |
+
z_slice, pitch_slice, ids_slice = commons.rand_slice_segments_with_pitch(z, f0, spec_lengths, self.segment_size)
|
| 399 |
+
|
| 400 |
+
# nsf decoder
|
| 401 |
+
o = self.dec(z_slice, g=g, f0=pitch_slice)
|
| 402 |
+
|
| 403 |
+
return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q), pred_lf0, norm_lf0, lf0
|
| 404 |
+
|
| 405 |
+
def infer(self, c, f0, uv, g=None, noice_scale=0.35, predict_f0=False):
|
| 406 |
+
c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
|
| 407 |
+
g = self.emb_g(g).transpose(1,2)
|
| 408 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype)
|
| 409 |
+
x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1,2)
|
| 410 |
+
|
| 411 |
+
if predict_f0:
|
| 412 |
+
lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500
|
| 413 |
+
norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False)
|
| 414 |
+
pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g)
|
| 415 |
+
f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1)
|
| 416 |
+
|
| 417 |
+
z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), noice_scale=noice_scale)
|
| 418 |
+
z = self.flow(z_p, c_mask, g=g, reverse=True)
|
| 419 |
+
o = self.dec(z * c_mask, g=g, f0=f0)
|
| 420 |
+
return o
|
modules/__init__.py
ADDED
|
File without changes
|
modules/attentions.py
ADDED
|
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import math
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn
|
| 6 |
+
from torch.nn import functional as F
|
| 7 |
+
|
| 8 |
+
import modules.commons as commons
|
| 9 |
+
import modules.modules as modules
|
| 10 |
+
from modules.modules import LayerNorm
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class FFT(nn.Module):
|
| 14 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers=1, kernel_size=1, p_dropout=0.,
|
| 15 |
+
proximal_bias=False, proximal_init=True, **kwargs):
|
| 16 |
+
super().__init__()
|
| 17 |
+
self.hidden_channels = hidden_channels
|
| 18 |
+
self.filter_channels = filter_channels
|
| 19 |
+
self.n_heads = n_heads
|
| 20 |
+
self.n_layers = n_layers
|
| 21 |
+
self.kernel_size = kernel_size
|
| 22 |
+
self.p_dropout = p_dropout
|
| 23 |
+
self.proximal_bias = proximal_bias
|
| 24 |
+
self.proximal_init = proximal_init
|
| 25 |
+
|
| 26 |
+
self.drop = nn.Dropout(p_dropout)
|
| 27 |
+
self.self_attn_layers = nn.ModuleList()
|
| 28 |
+
self.norm_layers_0 = nn.ModuleList()
|
| 29 |
+
self.ffn_layers = nn.ModuleList()
|
| 30 |
+
self.norm_layers_1 = nn.ModuleList()
|
| 31 |
+
for i in range(self.n_layers):
|
| 32 |
+
self.self_attn_layers.append(
|
| 33 |
+
MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias,
|
| 34 |
+
proximal_init=proximal_init))
|
| 35 |
+
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
| 36 |
+
self.ffn_layers.append(
|
| 37 |
+
FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
| 38 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
| 39 |
+
|
| 40 |
+
def forward(self, x, x_mask):
|
| 41 |
+
"""
|
| 42 |
+
x: decoder input
|
| 43 |
+
h: encoder output
|
| 44 |
+
"""
|
| 45 |
+
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
| 46 |
+
x = x * x_mask
|
| 47 |
+
for i in range(self.n_layers):
|
| 48 |
+
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
| 49 |
+
y = self.drop(y)
|
| 50 |
+
x = self.norm_layers_0[i](x + y)
|
| 51 |
+
|
| 52 |
+
y = self.ffn_layers[i](x, x_mask)
|
| 53 |
+
y = self.drop(y)
|
| 54 |
+
x = self.norm_layers_1[i](x + y)
|
| 55 |
+
x = x * x_mask
|
| 56 |
+
return x
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class Encoder(nn.Module):
|
| 60 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
| 61 |
+
super().__init__()
|
| 62 |
+
self.hidden_channels = hidden_channels
|
| 63 |
+
self.filter_channels = filter_channels
|
| 64 |
+
self.n_heads = n_heads
|
| 65 |
+
self.n_layers = n_layers
|
| 66 |
+
self.kernel_size = kernel_size
|
| 67 |
+
self.p_dropout = p_dropout
|
| 68 |
+
self.window_size = window_size
|
| 69 |
+
|
| 70 |
+
self.drop = nn.Dropout(p_dropout)
|
| 71 |
+
self.attn_layers = nn.ModuleList()
|
| 72 |
+
self.norm_layers_1 = nn.ModuleList()
|
| 73 |
+
self.ffn_layers = nn.ModuleList()
|
| 74 |
+
self.norm_layers_2 = nn.ModuleList()
|
| 75 |
+
for i in range(self.n_layers):
|
| 76 |
+
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
| 77 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
| 78 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
| 79 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
| 80 |
+
|
| 81 |
+
def forward(self, x, x_mask):
|
| 82 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
| 83 |
+
x = x * x_mask
|
| 84 |
+
for i in range(self.n_layers):
|
| 85 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
| 86 |
+
y = self.drop(y)
|
| 87 |
+
x = self.norm_layers_1[i](x + y)
|
| 88 |
+
|
| 89 |
+
y = self.ffn_layers[i](x, x_mask)
|
| 90 |
+
y = self.drop(y)
|
| 91 |
+
x = self.norm_layers_2[i](x + y)
|
| 92 |
+
x = x * x_mask
|
| 93 |
+
return x
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class Decoder(nn.Module):
|
| 97 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
| 98 |
+
super().__init__()
|
| 99 |
+
self.hidden_channels = hidden_channels
|
| 100 |
+
self.filter_channels = filter_channels
|
| 101 |
+
self.n_heads = n_heads
|
| 102 |
+
self.n_layers = n_layers
|
| 103 |
+
self.kernel_size = kernel_size
|
| 104 |
+
self.p_dropout = p_dropout
|
| 105 |
+
self.proximal_bias = proximal_bias
|
| 106 |
+
self.proximal_init = proximal_init
|
| 107 |
+
|
| 108 |
+
self.drop = nn.Dropout(p_dropout)
|
| 109 |
+
self.self_attn_layers = nn.ModuleList()
|
| 110 |
+
self.norm_layers_0 = nn.ModuleList()
|
| 111 |
+
self.encdec_attn_layers = nn.ModuleList()
|
| 112 |
+
self.norm_layers_1 = nn.ModuleList()
|
| 113 |
+
self.ffn_layers = nn.ModuleList()
|
| 114 |
+
self.norm_layers_2 = nn.ModuleList()
|
| 115 |
+
for i in range(self.n_layers):
|
| 116 |
+
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
| 117 |
+
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
| 118 |
+
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
| 119 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
| 120 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
| 121 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
| 122 |
+
|
| 123 |
+
def forward(self, x, x_mask, h, h_mask):
|
| 124 |
+
"""
|
| 125 |
+
x: decoder input
|
| 126 |
+
h: encoder output
|
| 127 |
+
"""
|
| 128 |
+
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
| 129 |
+
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
| 130 |
+
x = x * x_mask
|
| 131 |
+
for i in range(self.n_layers):
|
| 132 |
+
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
| 133 |
+
y = self.drop(y)
|
| 134 |
+
x = self.norm_layers_0[i](x + y)
|
| 135 |
+
|
| 136 |
+
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
| 137 |
+
y = self.drop(y)
|
| 138 |
+
x = self.norm_layers_1[i](x + y)
|
| 139 |
+
|
| 140 |
+
y = self.ffn_layers[i](x, x_mask)
|
| 141 |
+
y = self.drop(y)
|
| 142 |
+
x = self.norm_layers_2[i](x + y)
|
| 143 |
+
x = x * x_mask
|
| 144 |
+
return x
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class MultiHeadAttention(nn.Module):
|
| 148 |
+
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
| 149 |
+
super().__init__()
|
| 150 |
+
assert channels % n_heads == 0
|
| 151 |
+
|
| 152 |
+
self.channels = channels
|
| 153 |
+
self.out_channels = out_channels
|
| 154 |
+
self.n_heads = n_heads
|
| 155 |
+
self.p_dropout = p_dropout
|
| 156 |
+
self.window_size = window_size
|
| 157 |
+
self.heads_share = heads_share
|
| 158 |
+
self.block_length = block_length
|
| 159 |
+
self.proximal_bias = proximal_bias
|
| 160 |
+
self.proximal_init = proximal_init
|
| 161 |
+
self.attn = None
|
| 162 |
+
|
| 163 |
+
self.k_channels = channels // n_heads
|
| 164 |
+
self.conv_q = nn.Conv1d(channels, channels, 1)
|
| 165 |
+
self.conv_k = nn.Conv1d(channels, channels, 1)
|
| 166 |
+
self.conv_v = nn.Conv1d(channels, channels, 1)
|
| 167 |
+
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
| 168 |
+
self.drop = nn.Dropout(p_dropout)
|
| 169 |
+
|
| 170 |
+
if window_size is not None:
|
| 171 |
+
n_heads_rel = 1 if heads_share else n_heads
|
| 172 |
+
rel_stddev = self.k_channels**-0.5
|
| 173 |
+
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
| 174 |
+
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
| 175 |
+
|
| 176 |
+
nn.init.xavier_uniform_(self.conv_q.weight)
|
| 177 |
+
nn.init.xavier_uniform_(self.conv_k.weight)
|
| 178 |
+
nn.init.xavier_uniform_(self.conv_v.weight)
|
| 179 |
+
if proximal_init:
|
| 180 |
+
with torch.no_grad():
|
| 181 |
+
self.conv_k.weight.copy_(self.conv_q.weight)
|
| 182 |
+
self.conv_k.bias.copy_(self.conv_q.bias)
|
| 183 |
+
|
| 184 |
+
def forward(self, x, c, attn_mask=None):
|
| 185 |
+
q = self.conv_q(x)
|
| 186 |
+
k = self.conv_k(c)
|
| 187 |
+
v = self.conv_v(c)
|
| 188 |
+
|
| 189 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
| 190 |
+
|
| 191 |
+
x = self.conv_o(x)
|
| 192 |
+
return x
|
| 193 |
+
|
| 194 |
+
def attention(self, query, key, value, mask=None):
|
| 195 |
+
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
| 196 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
| 197 |
+
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
| 198 |
+
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
| 199 |
+
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
| 200 |
+
|
| 201 |
+
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
| 202 |
+
if self.window_size is not None:
|
| 203 |
+
assert t_s == t_t, "Relative attention is only available for self-attention."
|
| 204 |
+
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
| 205 |
+
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
| 206 |
+
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
| 207 |
+
scores = scores + scores_local
|
| 208 |
+
if self.proximal_bias:
|
| 209 |
+
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
| 210 |
+
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
| 211 |
+
if mask is not None:
|
| 212 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
| 213 |
+
if self.block_length is not None:
|
| 214 |
+
assert t_s == t_t, "Local attention is only available for self-attention."
|
| 215 |
+
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
| 216 |
+
scores = scores.masked_fill(block_mask == 0, -1e4)
|
| 217 |
+
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
| 218 |
+
p_attn = self.drop(p_attn)
|
| 219 |
+
output = torch.matmul(p_attn, value)
|
| 220 |
+
if self.window_size is not None:
|
| 221 |
+
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
| 222 |
+
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
| 223 |
+
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
| 224 |
+
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
| 225 |
+
return output, p_attn
|
| 226 |
+
|
| 227 |
+
def _matmul_with_relative_values(self, x, y):
|
| 228 |
+
"""
|
| 229 |
+
x: [b, h, l, m]
|
| 230 |
+
y: [h or 1, m, d]
|
| 231 |
+
ret: [b, h, l, d]
|
| 232 |
+
"""
|
| 233 |
+
ret = torch.matmul(x, y.unsqueeze(0))
|
| 234 |
+
return ret
|
| 235 |
+
|
| 236 |
+
def _matmul_with_relative_keys(self, x, y):
|
| 237 |
+
"""
|
| 238 |
+
x: [b, h, l, d]
|
| 239 |
+
y: [h or 1, m, d]
|
| 240 |
+
ret: [b, h, l, m]
|
| 241 |
+
"""
|
| 242 |
+
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
| 243 |
+
return ret
|
| 244 |
+
|
| 245 |
+
def _get_relative_embeddings(self, relative_embeddings, length):
|
| 246 |
+
max_relative_position = 2 * self.window_size + 1
|
| 247 |
+
# Pad first before slice to avoid using cond ops.
|
| 248 |
+
pad_length = max(length - (self.window_size + 1), 0)
|
| 249 |
+
slice_start_position = max((self.window_size + 1) - length, 0)
|
| 250 |
+
slice_end_position = slice_start_position + 2 * length - 1
|
| 251 |
+
if pad_length > 0:
|
| 252 |
+
padded_relative_embeddings = F.pad(
|
| 253 |
+
relative_embeddings,
|
| 254 |
+
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
| 255 |
+
else:
|
| 256 |
+
padded_relative_embeddings = relative_embeddings
|
| 257 |
+
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
| 258 |
+
return used_relative_embeddings
|
| 259 |
+
|
| 260 |
+
def _relative_position_to_absolute_position(self, x):
|
| 261 |
+
"""
|
| 262 |
+
x: [b, h, l, 2*l-1]
|
| 263 |
+
ret: [b, h, l, l]
|
| 264 |
+
"""
|
| 265 |
+
batch, heads, length, _ = x.size()
|
| 266 |
+
# Concat columns of pad to shift from relative to absolute indexing.
|
| 267 |
+
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
| 268 |
+
|
| 269 |
+
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
| 270 |
+
x_flat = x.view([batch, heads, length * 2 * length])
|
| 271 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
| 272 |
+
|
| 273 |
+
# Reshape and slice out the padded elements.
|
| 274 |
+
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
| 275 |
+
return x_final
|
| 276 |
+
|
| 277 |
+
def _absolute_position_to_relative_position(self, x):
|
| 278 |
+
"""
|
| 279 |
+
x: [b, h, l, l]
|
| 280 |
+
ret: [b, h, l, 2*l-1]
|
| 281 |
+
"""
|
| 282 |
+
batch, heads, length, _ = x.size()
|
| 283 |
+
# padd along column
|
| 284 |
+
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
| 285 |
+
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
| 286 |
+
# add 0's in the beginning that will skew the elements after reshape
|
| 287 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
| 288 |
+
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
| 289 |
+
return x_final
|
| 290 |
+
|
| 291 |
+
def _attention_bias_proximal(self, length):
|
| 292 |
+
"""Bias for self-attention to encourage attention to close positions.
|
| 293 |
+
Args:
|
| 294 |
+
length: an integer scalar.
|
| 295 |
+
Returns:
|
| 296 |
+
a Tensor with shape [1, 1, length, length]
|
| 297 |
+
"""
|
| 298 |
+
r = torch.arange(length, dtype=torch.float32)
|
| 299 |
+
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
| 300 |
+
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
class FFN(nn.Module):
|
| 304 |
+
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
| 305 |
+
super().__init__()
|
| 306 |
+
self.in_channels = in_channels
|
| 307 |
+
self.out_channels = out_channels
|
| 308 |
+
self.filter_channels = filter_channels
|
| 309 |
+
self.kernel_size = kernel_size
|
| 310 |
+
self.p_dropout = p_dropout
|
| 311 |
+
self.activation = activation
|
| 312 |
+
self.causal = causal
|
| 313 |
+
|
| 314 |
+
if causal:
|
| 315 |
+
self.padding = self._causal_padding
|
| 316 |
+
else:
|
| 317 |
+
self.padding = self._same_padding
|
| 318 |
+
|
| 319 |
+
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
| 320 |
+
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
| 321 |
+
self.drop = nn.Dropout(p_dropout)
|
| 322 |
+
|
| 323 |
+
def forward(self, x, x_mask):
|
| 324 |
+
x = self.conv_1(self.padding(x * x_mask))
|
| 325 |
+
if self.activation == "gelu":
|
| 326 |
+
x = x * torch.sigmoid(1.702 * x)
|
| 327 |
+
else:
|
| 328 |
+
x = torch.relu(x)
|
| 329 |
+
x = self.drop(x)
|
| 330 |
+
x = self.conv_2(self.padding(x * x_mask))
|
| 331 |
+
return x * x_mask
|
| 332 |
+
|
| 333 |
+
def _causal_padding(self, x):
|
| 334 |
+
if self.kernel_size == 1:
|
| 335 |
+
return x
|
| 336 |
+
pad_l = self.kernel_size - 1
|
| 337 |
+
pad_r = 0
|
| 338 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
| 339 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
| 340 |
+
return x
|
| 341 |
+
|
| 342 |
+
def _same_padding(self, x):
|
| 343 |
+
if self.kernel_size == 1:
|
| 344 |
+
return x
|
| 345 |
+
pad_l = (self.kernel_size - 1) // 2
|
| 346 |
+
pad_r = self.kernel_size // 2
|
| 347 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
| 348 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
| 349 |
+
return x
|
modules/commons.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
def slice_pitch_segments(x, ids_str, segment_size=4):
|
| 8 |
+
ret = torch.zeros_like(x[:, :segment_size])
|
| 9 |
+
for i in range(x.size(0)):
|
| 10 |
+
idx_str = ids_str[i]
|
| 11 |
+
idx_end = idx_str + segment_size
|
| 12 |
+
ret[i] = x[i, idx_str:idx_end]
|
| 13 |
+
return ret
|
| 14 |
+
|
| 15 |
+
def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4):
|
| 16 |
+
b, d, t = x.size()
|
| 17 |
+
if x_lengths is None:
|
| 18 |
+
x_lengths = t
|
| 19 |
+
ids_str_max = x_lengths - segment_size + 1
|
| 20 |
+
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
| 21 |
+
ret = slice_segments(x, ids_str, segment_size)
|
| 22 |
+
ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size)
|
| 23 |
+
return ret, ret_pitch, ids_str
|
| 24 |
+
|
| 25 |
+
def init_weights(m, mean=0.0, std=0.01):
|
| 26 |
+
classname = m.__class__.__name__
|
| 27 |
+
if classname.find("Conv") != -1:
|
| 28 |
+
m.weight.data.normal_(mean, std)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_padding(kernel_size, dilation=1):
|
| 32 |
+
return int((kernel_size*dilation - dilation)/2)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def convert_pad_shape(pad_shape):
|
| 36 |
+
l = pad_shape[::-1]
|
| 37 |
+
pad_shape = [item for sublist in l for item in sublist]
|
| 38 |
+
return pad_shape
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def intersperse(lst, item):
|
| 42 |
+
result = [item] * (len(lst) * 2 + 1)
|
| 43 |
+
result[1::2] = lst
|
| 44 |
+
return result
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
| 48 |
+
"""KL(P||Q)"""
|
| 49 |
+
kl = (logs_q - logs_p) - 0.5
|
| 50 |
+
kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
|
| 51 |
+
return kl
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def rand_gumbel(shape):
|
| 55 |
+
"""Sample from the Gumbel distribution, protect from overflows."""
|
| 56 |
+
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
| 57 |
+
return -torch.log(-torch.log(uniform_samples))
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def rand_gumbel_like(x):
|
| 61 |
+
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
| 62 |
+
return g
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def slice_segments(x, ids_str, segment_size=4):
|
| 66 |
+
ret = torch.zeros_like(x[:, :, :segment_size])
|
| 67 |
+
for i in range(x.size(0)):
|
| 68 |
+
idx_str = ids_str[i]
|
| 69 |
+
idx_end = idx_str + segment_size
|
| 70 |
+
ret[i] = x[i, :, idx_str:idx_end]
|
| 71 |
+
return ret
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
| 75 |
+
b, d, t = x.size()
|
| 76 |
+
if x_lengths is None:
|
| 77 |
+
x_lengths = t
|
| 78 |
+
ids_str_max = x_lengths - segment_size + 1
|
| 79 |
+
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
| 80 |
+
ret = slice_segments(x, ids_str, segment_size)
|
| 81 |
+
return ret, ids_str
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def rand_spec_segments(x, x_lengths=None, segment_size=4):
|
| 85 |
+
b, d, t = x.size()
|
| 86 |
+
if x_lengths is None:
|
| 87 |
+
x_lengths = t
|
| 88 |
+
ids_str_max = x_lengths - segment_size
|
| 89 |
+
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
| 90 |
+
ret = slice_segments(x, ids_str, segment_size)
|
| 91 |
+
return ret, ids_str
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_timing_signal_1d(
|
| 95 |
+
length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
| 96 |
+
position = torch.arange(length, dtype=torch.float)
|
| 97 |
+
num_timescales = channels // 2
|
| 98 |
+
log_timescale_increment = (
|
| 99 |
+
math.log(float(max_timescale) / float(min_timescale)) /
|
| 100 |
+
(num_timescales - 1))
|
| 101 |
+
inv_timescales = min_timescale * torch.exp(
|
| 102 |
+
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
|
| 103 |
+
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
| 104 |
+
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
| 105 |
+
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
| 106 |
+
signal = signal.view(1, channels, length)
|
| 107 |
+
return signal
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
| 111 |
+
b, channels, length = x.size()
|
| 112 |
+
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
| 113 |
+
return x + signal.to(dtype=x.dtype, device=x.device)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
| 117 |
+
b, channels, length = x.size()
|
| 118 |
+
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
| 119 |
+
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def subsequent_mask(length):
|
| 123 |
+
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
| 124 |
+
return mask
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
@torch.jit.script
|
| 128 |
+
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
| 129 |
+
n_channels_int = n_channels[0]
|
| 130 |
+
in_act = input_a + input_b
|
| 131 |
+
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
| 132 |
+
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
| 133 |
+
acts = t_act * s_act
|
| 134 |
+
return acts
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def convert_pad_shape(pad_shape):
|
| 138 |
+
l = pad_shape[::-1]
|
| 139 |
+
pad_shape = [item for sublist in l for item in sublist]
|
| 140 |
+
return pad_shape
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def shift_1d(x):
|
| 144 |
+
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
| 145 |
+
return x
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def sequence_mask(length, max_length=None):
|
| 149 |
+
if max_length is None:
|
| 150 |
+
max_length = length.max()
|
| 151 |
+
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
| 152 |
+
return x.unsqueeze(0) < length.unsqueeze(1)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def generate_path(duration, mask):
|
| 156 |
+
"""
|
| 157 |
+
duration: [b, 1, t_x]
|
| 158 |
+
mask: [b, 1, t_y, t_x]
|
| 159 |
+
"""
|
| 160 |
+
device = duration.device
|
| 161 |
+
|
| 162 |
+
b, _, t_y, t_x = mask.shape
|
| 163 |
+
cum_duration = torch.cumsum(duration, -1)
|
| 164 |
+
|
| 165 |
+
cum_duration_flat = cum_duration.view(b * t_x)
|
| 166 |
+
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
| 167 |
+
path = path.view(b, t_x, t_y)
|
| 168 |
+
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
| 169 |
+
path = path.unsqueeze(1).transpose(2,3) * mask
|
| 170 |
+
return path
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
| 174 |
+
if isinstance(parameters, torch.Tensor):
|
| 175 |
+
parameters = [parameters]
|
| 176 |
+
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
| 177 |
+
norm_type = float(norm_type)
|
| 178 |
+
if clip_value is not None:
|
| 179 |
+
clip_value = float(clip_value)
|
| 180 |
+
|
| 181 |
+
total_norm = 0
|
| 182 |
+
for p in parameters:
|
| 183 |
+
param_norm = p.grad.data.norm(norm_type)
|
| 184 |
+
total_norm += param_norm.item() ** norm_type
|
| 185 |
+
if clip_value is not None:
|
| 186 |
+
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
| 187 |
+
total_norm = total_norm ** (1. / norm_type)
|
| 188 |
+
return total_norm
|
modules/losses.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.nn import functional as F
|
| 3 |
+
|
| 4 |
+
import modules.commons as commons
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def feature_loss(fmap_r, fmap_g):
|
| 8 |
+
loss = 0
|
| 9 |
+
for dr, dg in zip(fmap_r, fmap_g):
|
| 10 |
+
for rl, gl in zip(dr, dg):
|
| 11 |
+
rl = rl.float().detach()
|
| 12 |
+
gl = gl.float()
|
| 13 |
+
loss += torch.mean(torch.abs(rl - gl))
|
| 14 |
+
|
| 15 |
+
return loss * 2
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
|
| 19 |
+
loss = 0
|
| 20 |
+
r_losses = []
|
| 21 |
+
g_losses = []
|
| 22 |
+
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
|
| 23 |
+
dr = dr.float()
|
| 24 |
+
dg = dg.float()
|
| 25 |
+
r_loss = torch.mean((1-dr)**2)
|
| 26 |
+
g_loss = torch.mean(dg**2)
|
| 27 |
+
loss += (r_loss + g_loss)
|
| 28 |
+
r_losses.append(r_loss.item())
|
| 29 |
+
g_losses.append(g_loss.item())
|
| 30 |
+
|
| 31 |
+
return loss, r_losses, g_losses
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def generator_loss(disc_outputs):
|
| 35 |
+
loss = 0
|
| 36 |
+
gen_losses = []
|
| 37 |
+
for dg in disc_outputs:
|
| 38 |
+
dg = dg.float()
|
| 39 |
+
l = torch.mean((1-dg)**2)
|
| 40 |
+
gen_losses.append(l)
|
| 41 |
+
loss += l
|
| 42 |
+
|
| 43 |
+
return loss, gen_losses
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
|
| 47 |
+
"""
|
| 48 |
+
z_p, logs_q: [b, h, t_t]
|
| 49 |
+
m_p, logs_p: [b, h, t_t]
|
| 50 |
+
"""
|
| 51 |
+
z_p = z_p.float()
|
| 52 |
+
logs_q = logs_q.float()
|
| 53 |
+
m_p = m_p.float()
|
| 54 |
+
logs_p = logs_p.float()
|
| 55 |
+
z_mask = z_mask.float()
|
| 56 |
+
#print(logs_p)
|
| 57 |
+
kl = logs_p - logs_q - 0.5
|
| 58 |
+
kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
|
| 59 |
+
kl = torch.sum(kl * z_mask)
|
| 60 |
+
l = kl / torch.sum(z_mask)
|
| 61 |
+
return l
|
modules/mel_processing.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
import torch.utils.data
|
| 8 |
+
import numpy as np
|
| 9 |
+
import librosa
|
| 10 |
+
import librosa.util as librosa_util
|
| 11 |
+
from librosa.util import normalize, pad_center, tiny
|
| 12 |
+
from scipy.signal import get_window
|
| 13 |
+
from scipy.io.wavfile import read
|
| 14 |
+
from librosa.filters import mel as librosa_mel_fn
|
| 15 |
+
|
| 16 |
+
MAX_WAV_VALUE = 32768.0
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
| 20 |
+
"""
|
| 21 |
+
PARAMS
|
| 22 |
+
------
|
| 23 |
+
C: compression factor
|
| 24 |
+
"""
|
| 25 |
+
return torch.log(torch.clamp(x, min=clip_val) * C)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def dynamic_range_decompression_torch(x, C=1):
|
| 29 |
+
"""
|
| 30 |
+
PARAMS
|
| 31 |
+
------
|
| 32 |
+
C: compression factor used to compress
|
| 33 |
+
"""
|
| 34 |
+
return torch.exp(x) / C
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def spectral_normalize_torch(magnitudes):
|
| 38 |
+
output = dynamic_range_compression_torch(magnitudes)
|
| 39 |
+
return output
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def spectral_de_normalize_torch(magnitudes):
|
| 43 |
+
output = dynamic_range_decompression_torch(magnitudes)
|
| 44 |
+
return output
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
mel_basis = {}
|
| 48 |
+
hann_window = {}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
| 52 |
+
if torch.min(y) < -1.:
|
| 53 |
+
print('min value is ', torch.min(y))
|
| 54 |
+
if torch.max(y) > 1.:
|
| 55 |
+
print('max value is ', torch.max(y))
|
| 56 |
+
|
| 57 |
+
global hann_window
|
| 58 |
+
dtype_device = str(y.dtype) + '_' + str(y.device)
|
| 59 |
+
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
| 60 |
+
if wnsize_dtype_device not in hann_window:
|
| 61 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
| 62 |
+
|
| 63 |
+
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
| 64 |
+
y = y.squeeze(1)
|
| 65 |
+
|
| 66 |
+
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
| 67 |
+
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
|
| 68 |
+
|
| 69 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
| 70 |
+
return spec
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
| 74 |
+
global mel_basis
|
| 75 |
+
dtype_device = str(spec.dtype) + '_' + str(spec.device)
|
| 76 |
+
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
| 77 |
+
if fmax_dtype_device not in mel_basis:
|
| 78 |
+
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
|
| 79 |
+
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
|
| 80 |
+
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
| 81 |
+
spec = spectral_normalize_torch(spec)
|
| 82 |
+
return spec
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
|
| 86 |
+
if torch.min(y) < -1.:
|
| 87 |
+
print('min value is ', torch.min(y))
|
| 88 |
+
if torch.max(y) > 1.:
|
| 89 |
+
print('max value is ', torch.max(y))
|
| 90 |
+
|
| 91 |
+
global mel_basis, hann_window
|
| 92 |
+
dtype_device = str(y.dtype) + '_' + str(y.device)
|
| 93 |
+
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
| 94 |
+
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
| 95 |
+
if fmax_dtype_device not in mel_basis:
|
| 96 |
+
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
|
| 97 |
+
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
|
| 98 |
+
if wnsize_dtype_device not in hann_window:
|
| 99 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
| 100 |
+
|
| 101 |
+
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
| 102 |
+
y = y.squeeze(1)
|
| 103 |
+
|
| 104 |
+
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
| 105 |
+
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
|
| 106 |
+
|
| 107 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
| 108 |
+
|
| 109 |
+
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
| 110 |
+
spec = spectral_normalize_torch(spec)
|
| 111 |
+
|
| 112 |
+
return spec
|
modules/modules.py
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import math
|
| 3 |
+
import numpy as np
|
| 4 |
+
import scipy
|
| 5 |
+
import torch
|
| 6 |
+
from torch import nn
|
| 7 |
+
from torch.nn import functional as F
|
| 8 |
+
|
| 9 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
| 10 |
+
from torch.nn.utils import weight_norm, remove_weight_norm
|
| 11 |
+
|
| 12 |
+
import modules.commons as commons
|
| 13 |
+
from modules.commons import init_weights, get_padding
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
LRELU_SLOPE = 0.1
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class LayerNorm(nn.Module):
|
| 20 |
+
def __init__(self, channels, eps=1e-5):
|
| 21 |
+
super().__init__()
|
| 22 |
+
self.channels = channels
|
| 23 |
+
self.eps = eps
|
| 24 |
+
|
| 25 |
+
self.gamma = nn.Parameter(torch.ones(channels))
|
| 26 |
+
self.beta = nn.Parameter(torch.zeros(channels))
|
| 27 |
+
|
| 28 |
+
def forward(self, x):
|
| 29 |
+
x = x.transpose(1, -1)
|
| 30 |
+
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
| 31 |
+
return x.transpose(1, -1)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class ConvReluNorm(nn.Module):
|
| 35 |
+
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
|
| 36 |
+
super().__init__()
|
| 37 |
+
self.in_channels = in_channels
|
| 38 |
+
self.hidden_channels = hidden_channels
|
| 39 |
+
self.out_channels = out_channels
|
| 40 |
+
self.kernel_size = kernel_size
|
| 41 |
+
self.n_layers = n_layers
|
| 42 |
+
self.p_dropout = p_dropout
|
| 43 |
+
assert n_layers > 1, "Number of layers should be larger than 0."
|
| 44 |
+
|
| 45 |
+
self.conv_layers = nn.ModuleList()
|
| 46 |
+
self.norm_layers = nn.ModuleList()
|
| 47 |
+
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
|
| 48 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
| 49 |
+
self.relu_drop = nn.Sequential(
|
| 50 |
+
nn.ReLU(),
|
| 51 |
+
nn.Dropout(p_dropout))
|
| 52 |
+
for _ in range(n_layers-1):
|
| 53 |
+
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
|
| 54 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
| 55 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
| 56 |
+
self.proj.weight.data.zero_()
|
| 57 |
+
self.proj.bias.data.zero_()
|
| 58 |
+
|
| 59 |
+
def forward(self, x, x_mask):
|
| 60 |
+
x_org = x
|
| 61 |
+
for i in range(self.n_layers):
|
| 62 |
+
x = self.conv_layers[i](x * x_mask)
|
| 63 |
+
x = self.norm_layers[i](x)
|
| 64 |
+
x = self.relu_drop(x)
|
| 65 |
+
x = x_org + self.proj(x)
|
| 66 |
+
return x * x_mask
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class DDSConv(nn.Module):
|
| 70 |
+
"""
|
| 71 |
+
Dialted and Depth-Separable Convolution
|
| 72 |
+
"""
|
| 73 |
+
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
|
| 74 |
+
super().__init__()
|
| 75 |
+
self.channels = channels
|
| 76 |
+
self.kernel_size = kernel_size
|
| 77 |
+
self.n_layers = n_layers
|
| 78 |
+
self.p_dropout = p_dropout
|
| 79 |
+
|
| 80 |
+
self.drop = nn.Dropout(p_dropout)
|
| 81 |
+
self.convs_sep = nn.ModuleList()
|
| 82 |
+
self.convs_1x1 = nn.ModuleList()
|
| 83 |
+
self.norms_1 = nn.ModuleList()
|
| 84 |
+
self.norms_2 = nn.ModuleList()
|
| 85 |
+
for i in range(n_layers):
|
| 86 |
+
dilation = kernel_size ** i
|
| 87 |
+
padding = (kernel_size * dilation - dilation) // 2
|
| 88 |
+
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
|
| 89 |
+
groups=channels, dilation=dilation, padding=padding
|
| 90 |
+
))
|
| 91 |
+
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
|
| 92 |
+
self.norms_1.append(LayerNorm(channels))
|
| 93 |
+
self.norms_2.append(LayerNorm(channels))
|
| 94 |
+
|
| 95 |
+
def forward(self, x, x_mask, g=None):
|
| 96 |
+
if g is not None:
|
| 97 |
+
x = x + g
|
| 98 |
+
for i in range(self.n_layers):
|
| 99 |
+
y = self.convs_sep[i](x * x_mask)
|
| 100 |
+
y = self.norms_1[i](y)
|
| 101 |
+
y = F.gelu(y)
|
| 102 |
+
y = self.convs_1x1[i](y)
|
| 103 |
+
y = self.norms_2[i](y)
|
| 104 |
+
y = F.gelu(y)
|
| 105 |
+
y = self.drop(y)
|
| 106 |
+
x = x + y
|
| 107 |
+
return x * x_mask
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class WN(torch.nn.Module):
|
| 111 |
+
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
|
| 112 |
+
super(WN, self).__init__()
|
| 113 |
+
assert(kernel_size % 2 == 1)
|
| 114 |
+
self.hidden_channels =hidden_channels
|
| 115 |
+
self.kernel_size = kernel_size,
|
| 116 |
+
self.dilation_rate = dilation_rate
|
| 117 |
+
self.n_layers = n_layers
|
| 118 |
+
self.gin_channels = gin_channels
|
| 119 |
+
self.p_dropout = p_dropout
|
| 120 |
+
|
| 121 |
+
self.in_layers = torch.nn.ModuleList()
|
| 122 |
+
self.res_skip_layers = torch.nn.ModuleList()
|
| 123 |
+
self.drop = nn.Dropout(p_dropout)
|
| 124 |
+
|
| 125 |
+
if gin_channels != 0:
|
| 126 |
+
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
|
| 127 |
+
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
|
| 128 |
+
|
| 129 |
+
for i in range(n_layers):
|
| 130 |
+
dilation = dilation_rate ** i
|
| 131 |
+
padding = int((kernel_size * dilation - dilation) / 2)
|
| 132 |
+
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
|
| 133 |
+
dilation=dilation, padding=padding)
|
| 134 |
+
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
|
| 135 |
+
self.in_layers.append(in_layer)
|
| 136 |
+
|
| 137 |
+
# last one is not necessary
|
| 138 |
+
if i < n_layers - 1:
|
| 139 |
+
res_skip_channels = 2 * hidden_channels
|
| 140 |
+
else:
|
| 141 |
+
res_skip_channels = hidden_channels
|
| 142 |
+
|
| 143 |
+
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
| 144 |
+
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
|
| 145 |
+
self.res_skip_layers.append(res_skip_layer)
|
| 146 |
+
|
| 147 |
+
def forward(self, x, x_mask, g=None, **kwargs):
|
| 148 |
+
output = torch.zeros_like(x)
|
| 149 |
+
n_channels_tensor = torch.IntTensor([self.hidden_channels])
|
| 150 |
+
|
| 151 |
+
if g is not None:
|
| 152 |
+
g = self.cond_layer(g)
|
| 153 |
+
|
| 154 |
+
for i in range(self.n_layers):
|
| 155 |
+
x_in = self.in_layers[i](x)
|
| 156 |
+
if g is not None:
|
| 157 |
+
cond_offset = i * 2 * self.hidden_channels
|
| 158 |
+
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
|
| 159 |
+
else:
|
| 160 |
+
g_l = torch.zeros_like(x_in)
|
| 161 |
+
|
| 162 |
+
acts = commons.fused_add_tanh_sigmoid_multiply(
|
| 163 |
+
x_in,
|
| 164 |
+
g_l,
|
| 165 |
+
n_channels_tensor)
|
| 166 |
+
acts = self.drop(acts)
|
| 167 |
+
|
| 168 |
+
res_skip_acts = self.res_skip_layers[i](acts)
|
| 169 |
+
if i < self.n_layers - 1:
|
| 170 |
+
res_acts = res_skip_acts[:,:self.hidden_channels,:]
|
| 171 |
+
x = (x + res_acts) * x_mask
|
| 172 |
+
output = output + res_skip_acts[:,self.hidden_channels:,:]
|
| 173 |
+
else:
|
| 174 |
+
output = output + res_skip_acts
|
| 175 |
+
return output * x_mask
|
| 176 |
+
|
| 177 |
+
def remove_weight_norm(self):
|
| 178 |
+
if self.gin_channels != 0:
|
| 179 |
+
torch.nn.utils.remove_weight_norm(self.cond_layer)
|
| 180 |
+
for l in self.in_layers:
|
| 181 |
+
torch.nn.utils.remove_weight_norm(l)
|
| 182 |
+
for l in self.res_skip_layers:
|
| 183 |
+
torch.nn.utils.remove_weight_norm(l)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class ResBlock1(torch.nn.Module):
|
| 187 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
|
| 188 |
+
super(ResBlock1, self).__init__()
|
| 189 |
+
self.convs1 = nn.ModuleList([
|
| 190 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
| 191 |
+
padding=get_padding(kernel_size, dilation[0]))),
|
| 192 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
| 193 |
+
padding=get_padding(kernel_size, dilation[1]))),
|
| 194 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
| 195 |
+
padding=get_padding(kernel_size, dilation[2])))
|
| 196 |
+
])
|
| 197 |
+
self.convs1.apply(init_weights)
|
| 198 |
+
|
| 199 |
+
self.convs2 = nn.ModuleList([
|
| 200 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| 201 |
+
padding=get_padding(kernel_size, 1))),
|
| 202 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| 203 |
+
padding=get_padding(kernel_size, 1))),
|
| 204 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| 205 |
+
padding=get_padding(kernel_size, 1)))
|
| 206 |
+
])
|
| 207 |
+
self.convs2.apply(init_weights)
|
| 208 |
+
|
| 209 |
+
def forward(self, x, x_mask=None):
|
| 210 |
+
for c1, c2 in zip(self.convs1, self.convs2):
|
| 211 |
+
xt = F.leaky_relu(x, LRELU_SLOPE)
|
| 212 |
+
if x_mask is not None:
|
| 213 |
+
xt = xt * x_mask
|
| 214 |
+
xt = c1(xt)
|
| 215 |
+
xt = F.leaky_relu(xt, LRELU_SLOPE)
|
| 216 |
+
if x_mask is not None:
|
| 217 |
+
xt = xt * x_mask
|
| 218 |
+
xt = c2(xt)
|
| 219 |
+
x = xt + x
|
| 220 |
+
if x_mask is not None:
|
| 221 |
+
x = x * x_mask
|
| 222 |
+
return x
|
| 223 |
+
|
| 224 |
+
def remove_weight_norm(self):
|
| 225 |
+
for l in self.convs1:
|
| 226 |
+
remove_weight_norm(l)
|
| 227 |
+
for l in self.convs2:
|
| 228 |
+
remove_weight_norm(l)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
class ResBlock2(torch.nn.Module):
|
| 232 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
|
| 233 |
+
super(ResBlock2, self).__init__()
|
| 234 |
+
self.convs = nn.ModuleList([
|
| 235 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
| 236 |
+
padding=get_padding(kernel_size, dilation[0]))),
|
| 237 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
| 238 |
+
padding=get_padding(kernel_size, dilation[1])))
|
| 239 |
+
])
|
| 240 |
+
self.convs.apply(init_weights)
|
| 241 |
+
|
| 242 |
+
def forward(self, x, x_mask=None):
|
| 243 |
+
for c in self.convs:
|
| 244 |
+
xt = F.leaky_relu(x, LRELU_SLOPE)
|
| 245 |
+
if x_mask is not None:
|
| 246 |
+
xt = xt * x_mask
|
| 247 |
+
xt = c(xt)
|
| 248 |
+
x = xt + x
|
| 249 |
+
if x_mask is not None:
|
| 250 |
+
x = x * x_mask
|
| 251 |
+
return x
|
| 252 |
+
|
| 253 |
+
def remove_weight_norm(self):
|
| 254 |
+
for l in self.convs:
|
| 255 |
+
remove_weight_norm(l)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
class Log(nn.Module):
|
| 259 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
| 260 |
+
if not reverse:
|
| 261 |
+
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
|
| 262 |
+
logdet = torch.sum(-y, [1, 2])
|
| 263 |
+
return y, logdet
|
| 264 |
+
else:
|
| 265 |
+
x = torch.exp(x) * x_mask
|
| 266 |
+
return x
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class Flip(nn.Module):
|
| 270 |
+
def forward(self, x, *args, reverse=False, **kwargs):
|
| 271 |
+
x = torch.flip(x, [1])
|
| 272 |
+
if not reverse:
|
| 273 |
+
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
|
| 274 |
+
return x, logdet
|
| 275 |
+
else:
|
| 276 |
+
return x
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class ElementwiseAffine(nn.Module):
|
| 280 |
+
def __init__(self, channels):
|
| 281 |
+
super().__init__()
|
| 282 |
+
self.channels = channels
|
| 283 |
+
self.m = nn.Parameter(torch.zeros(channels,1))
|
| 284 |
+
self.logs = nn.Parameter(torch.zeros(channels,1))
|
| 285 |
+
|
| 286 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
| 287 |
+
if not reverse:
|
| 288 |
+
y = self.m + torch.exp(self.logs) * x
|
| 289 |
+
y = y * x_mask
|
| 290 |
+
logdet = torch.sum(self.logs * x_mask, [1,2])
|
| 291 |
+
return y, logdet
|
| 292 |
+
else:
|
| 293 |
+
x = (x - self.m) * torch.exp(-self.logs) * x_mask
|
| 294 |
+
return x
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
class ResidualCouplingLayer(nn.Module):
|
| 298 |
+
def __init__(self,
|
| 299 |
+
channels,
|
| 300 |
+
hidden_channels,
|
| 301 |
+
kernel_size,
|
| 302 |
+
dilation_rate,
|
| 303 |
+
n_layers,
|
| 304 |
+
p_dropout=0,
|
| 305 |
+
gin_channels=0,
|
| 306 |
+
mean_only=False):
|
| 307 |
+
assert channels % 2 == 0, "channels should be divisible by 2"
|
| 308 |
+
super().__init__()
|
| 309 |
+
self.channels = channels
|
| 310 |
+
self.hidden_channels = hidden_channels
|
| 311 |
+
self.kernel_size = kernel_size
|
| 312 |
+
self.dilation_rate = dilation_rate
|
| 313 |
+
self.n_layers = n_layers
|
| 314 |
+
self.half_channels = channels // 2
|
| 315 |
+
self.mean_only = mean_only
|
| 316 |
+
|
| 317 |
+
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
|
| 318 |
+
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
|
| 319 |
+
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
|
| 320 |
+
self.post.weight.data.zero_()
|
| 321 |
+
self.post.bias.data.zero_()
|
| 322 |
+
|
| 323 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
| 324 |
+
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
|
| 325 |
+
h = self.pre(x0) * x_mask
|
| 326 |
+
h = self.enc(h, x_mask, g=g)
|
| 327 |
+
stats = self.post(h) * x_mask
|
| 328 |
+
if not self.mean_only:
|
| 329 |
+
m, logs = torch.split(stats, [self.half_channels]*2, 1)
|
| 330 |
+
else:
|
| 331 |
+
m = stats
|
| 332 |
+
logs = torch.zeros_like(m)
|
| 333 |
+
|
| 334 |
+
if not reverse:
|
| 335 |
+
x1 = m + x1 * torch.exp(logs) * x_mask
|
| 336 |
+
x = torch.cat([x0, x1], 1)
|
| 337 |
+
logdet = torch.sum(logs, [1,2])
|
| 338 |
+
return x, logdet
|
| 339 |
+
else:
|
| 340 |
+
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
| 341 |
+
x = torch.cat([x0, x1], 1)
|
| 342 |
+
return x
|
onnx_export.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from onnxexport.model_onnx import SynthesizerTrn
|
| 3 |
+
import utils
|
| 4 |
+
|
| 5 |
+
def main(NetExport):
|
| 6 |
+
path = "SoVits4.0"
|
| 7 |
+
if NetExport:
|
| 8 |
+
device = torch.device("cpu")
|
| 9 |
+
hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json")
|
| 10 |
+
SVCVITS = SynthesizerTrn(
|
| 11 |
+
hps.data.filter_length // 2 + 1,
|
| 12 |
+
hps.train.segment_size // hps.data.hop_length,
|
| 13 |
+
**hps.model)
|
| 14 |
+
_ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", SVCVITS, None)
|
| 15 |
+
_ = SVCVITS.eval().to(device)
|
| 16 |
+
for i in SVCVITS.parameters():
|
| 17 |
+
i.requires_grad = False
|
| 18 |
+
|
| 19 |
+
test_hidden_unit = torch.rand(1, 10, 256)
|
| 20 |
+
test_pitch = torch.rand(1, 10)
|
| 21 |
+
test_mel2ph = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).unsqueeze(0)
|
| 22 |
+
test_uv = torch.ones(1, 10, dtype=torch.float32)
|
| 23 |
+
test_noise = torch.randn(1, 192, 10)
|
| 24 |
+
test_sid = torch.LongTensor([0])
|
| 25 |
+
input_names = ["c", "f0", "mel2ph", "uv", "noise", "sid"]
|
| 26 |
+
output_names = ["audio", ]
|
| 27 |
+
|
| 28 |
+
torch.onnx.export(SVCVITS,
|
| 29 |
+
(
|
| 30 |
+
test_hidden_unit.to(device),
|
| 31 |
+
test_pitch.to(device),
|
| 32 |
+
test_mel2ph.to(device),
|
| 33 |
+
test_uv.to(device),
|
| 34 |
+
test_noise.to(device),
|
| 35 |
+
test_sid.to(device)
|
| 36 |
+
),
|
| 37 |
+
f"checkpoints/{path}/model.onnx",
|
| 38 |
+
dynamic_axes={
|
| 39 |
+
"c": [0, 1],
|
| 40 |
+
"f0": [1],
|
| 41 |
+
"mel2ph": [1],
|
| 42 |
+
"uv": [1],
|
| 43 |
+
"noise": [2],
|
| 44 |
+
},
|
| 45 |
+
do_constant_folding=False,
|
| 46 |
+
opset_version=16,
|
| 47 |
+
verbose=False,
|
| 48 |
+
input_names=input_names,
|
| 49 |
+
output_names=output_names)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
if __name__ == '__main__':
|
| 53 |
+
main(True)
|
onnxexport/model_onnx.py
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch import nn
|
| 3 |
+
from torch.nn import functional as F
|
| 4 |
+
|
| 5 |
+
import modules.attentions as attentions
|
| 6 |
+
import modules.commons as commons
|
| 7 |
+
import modules.modules as modules
|
| 8 |
+
|
| 9 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
| 10 |
+
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
| 11 |
+
|
| 12 |
+
import utils
|
| 13 |
+
from modules.commons import init_weights, get_padding
|
| 14 |
+
from vdecoder.hifigan.models import Generator
|
| 15 |
+
from utils import f0_to_coarse
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ResidualCouplingBlock(nn.Module):
|
| 19 |
+
def __init__(self,
|
| 20 |
+
channels,
|
| 21 |
+
hidden_channels,
|
| 22 |
+
kernel_size,
|
| 23 |
+
dilation_rate,
|
| 24 |
+
n_layers,
|
| 25 |
+
n_flows=4,
|
| 26 |
+
gin_channels=0):
|
| 27 |
+
super().__init__()
|
| 28 |
+
self.channels = channels
|
| 29 |
+
self.hidden_channels = hidden_channels
|
| 30 |
+
self.kernel_size = kernel_size
|
| 31 |
+
self.dilation_rate = dilation_rate
|
| 32 |
+
self.n_layers = n_layers
|
| 33 |
+
self.n_flows = n_flows
|
| 34 |
+
self.gin_channels = gin_channels
|
| 35 |
+
|
| 36 |
+
self.flows = nn.ModuleList()
|
| 37 |
+
for i in range(n_flows):
|
| 38 |
+
self.flows.append(
|
| 39 |
+
modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
|
| 40 |
+
gin_channels=gin_channels, mean_only=True))
|
| 41 |
+
self.flows.append(modules.Flip())
|
| 42 |
+
|
| 43 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
| 44 |
+
if not reverse:
|
| 45 |
+
for flow in self.flows:
|
| 46 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
| 47 |
+
else:
|
| 48 |
+
for flow in reversed(self.flows):
|
| 49 |
+
x = flow(x, x_mask, g=g, reverse=reverse)
|
| 50 |
+
return x
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class Encoder(nn.Module):
|
| 54 |
+
def __init__(self,
|
| 55 |
+
in_channels,
|
| 56 |
+
out_channels,
|
| 57 |
+
hidden_channels,
|
| 58 |
+
kernel_size,
|
| 59 |
+
dilation_rate,
|
| 60 |
+
n_layers,
|
| 61 |
+
gin_channels=0):
|
| 62 |
+
super().__init__()
|
| 63 |
+
self.in_channels = in_channels
|
| 64 |
+
self.out_channels = out_channels
|
| 65 |
+
self.hidden_channels = hidden_channels
|
| 66 |
+
self.kernel_size = kernel_size
|
| 67 |
+
self.dilation_rate = dilation_rate
|
| 68 |
+
self.n_layers = n_layers
|
| 69 |
+
self.gin_channels = gin_channels
|
| 70 |
+
|
| 71 |
+
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
| 72 |
+
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
| 73 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
| 74 |
+
|
| 75 |
+
def forward(self, x, x_lengths, g=None):
|
| 76 |
+
# print(x.shape,x_lengths.shape)
|
| 77 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
| 78 |
+
x = self.pre(x) * x_mask
|
| 79 |
+
x = self.enc(x, x_mask, g=g)
|
| 80 |
+
stats = self.proj(x) * x_mask
|
| 81 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
| 82 |
+
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
| 83 |
+
return z, m, logs, x_mask
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class TextEncoder(nn.Module):
|
| 87 |
+
def __init__(self,
|
| 88 |
+
out_channels,
|
| 89 |
+
hidden_channels,
|
| 90 |
+
kernel_size,
|
| 91 |
+
n_layers,
|
| 92 |
+
gin_channels=0,
|
| 93 |
+
filter_channels=None,
|
| 94 |
+
n_heads=None,
|
| 95 |
+
p_dropout=None):
|
| 96 |
+
super().__init__()
|
| 97 |
+
self.out_channels = out_channels
|
| 98 |
+
self.hidden_channels = hidden_channels
|
| 99 |
+
self.kernel_size = kernel_size
|
| 100 |
+
self.n_layers = n_layers
|
| 101 |
+
self.gin_channels = gin_channels
|
| 102 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
| 103 |
+
self.f0_emb = nn.Embedding(256, hidden_channels)
|
| 104 |
+
|
| 105 |
+
self.enc_ = attentions.Encoder(
|
| 106 |
+
hidden_channels,
|
| 107 |
+
filter_channels,
|
| 108 |
+
n_heads,
|
| 109 |
+
n_layers,
|
| 110 |
+
kernel_size,
|
| 111 |
+
p_dropout)
|
| 112 |
+
|
| 113 |
+
def forward(self, x, x_mask, f0=None, z=None):
|
| 114 |
+
x = x + self.f0_emb(f0).transpose(1, 2)
|
| 115 |
+
x = self.enc_(x * x_mask, x_mask)
|
| 116 |
+
stats = self.proj(x) * x_mask
|
| 117 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
| 118 |
+
z = (m + z * torch.exp(logs)) * x_mask
|
| 119 |
+
return z, m, logs, x_mask
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class DiscriminatorP(torch.nn.Module):
|
| 123 |
+
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
| 124 |
+
super(DiscriminatorP, self).__init__()
|
| 125 |
+
self.period = period
|
| 126 |
+
self.use_spectral_norm = use_spectral_norm
|
| 127 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
| 128 |
+
self.convs = nn.ModuleList([
|
| 129 |
+
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 130 |
+
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 131 |
+
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 132 |
+
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 133 |
+
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
|
| 134 |
+
])
|
| 135 |
+
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
| 136 |
+
|
| 137 |
+
def forward(self, x):
|
| 138 |
+
fmap = []
|
| 139 |
+
|
| 140 |
+
# 1d to 2d
|
| 141 |
+
b, c, t = x.shape
|
| 142 |
+
if t % self.period != 0: # pad first
|
| 143 |
+
n_pad = self.period - (t % self.period)
|
| 144 |
+
x = F.pad(x, (0, n_pad), "reflect")
|
| 145 |
+
t = t + n_pad
|
| 146 |
+
x = x.view(b, c, t // self.period, self.period)
|
| 147 |
+
|
| 148 |
+
for l in self.convs:
|
| 149 |
+
x = l(x)
|
| 150 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
| 151 |
+
fmap.append(x)
|
| 152 |
+
x = self.conv_post(x)
|
| 153 |
+
fmap.append(x)
|
| 154 |
+
x = torch.flatten(x, 1, -1)
|
| 155 |
+
|
| 156 |
+
return x, fmap
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class DiscriminatorS(torch.nn.Module):
|
| 160 |
+
def __init__(self, use_spectral_norm=False):
|
| 161 |
+
super(DiscriminatorS, self).__init__()
|
| 162 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
| 163 |
+
self.convs = nn.ModuleList([
|
| 164 |
+
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
| 165 |
+
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
| 166 |
+
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
| 167 |
+
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
| 168 |
+
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
| 169 |
+
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
| 170 |
+
])
|
| 171 |
+
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
| 172 |
+
|
| 173 |
+
def forward(self, x):
|
| 174 |
+
fmap = []
|
| 175 |
+
|
| 176 |
+
for l in self.convs:
|
| 177 |
+
x = l(x)
|
| 178 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
| 179 |
+
fmap.append(x)
|
| 180 |
+
x = self.conv_post(x)
|
| 181 |
+
fmap.append(x)
|
| 182 |
+
x = torch.flatten(x, 1, -1)
|
| 183 |
+
|
| 184 |
+
return x, fmap
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
class F0Decoder(nn.Module):
|
| 188 |
+
def __init__(self,
|
| 189 |
+
out_channels,
|
| 190 |
+
hidden_channels,
|
| 191 |
+
filter_channels,
|
| 192 |
+
n_heads,
|
| 193 |
+
n_layers,
|
| 194 |
+
kernel_size,
|
| 195 |
+
p_dropout,
|
| 196 |
+
spk_channels=0):
|
| 197 |
+
super().__init__()
|
| 198 |
+
self.out_channels = out_channels
|
| 199 |
+
self.hidden_channels = hidden_channels
|
| 200 |
+
self.filter_channels = filter_channels
|
| 201 |
+
self.n_heads = n_heads
|
| 202 |
+
self.n_layers = n_layers
|
| 203 |
+
self.kernel_size = kernel_size
|
| 204 |
+
self.p_dropout = p_dropout
|
| 205 |
+
self.spk_channels = spk_channels
|
| 206 |
+
|
| 207 |
+
self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1)
|
| 208 |
+
self.decoder = attentions.FFT(
|
| 209 |
+
hidden_channels,
|
| 210 |
+
filter_channels,
|
| 211 |
+
n_heads,
|
| 212 |
+
n_layers,
|
| 213 |
+
kernel_size,
|
| 214 |
+
p_dropout)
|
| 215 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
| 216 |
+
self.f0_prenet = nn.Conv1d(1, hidden_channels, 3, padding=1)
|
| 217 |
+
self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
|
| 218 |
+
|
| 219 |
+
def forward(self, x, norm_f0, x_mask, spk_emb=None):
|
| 220 |
+
x = torch.detach(x)
|
| 221 |
+
if spk_emb is not None:
|
| 222 |
+
x = x + self.cond(spk_emb)
|
| 223 |
+
x += self.f0_prenet(norm_f0)
|
| 224 |
+
x = self.prenet(x) * x_mask
|
| 225 |
+
x = self.decoder(x * x_mask, x_mask)
|
| 226 |
+
x = self.proj(x) * x_mask
|
| 227 |
+
return x
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class SynthesizerTrn(nn.Module):
|
| 231 |
+
"""
|
| 232 |
+
Synthesizer for Training
|
| 233 |
+
"""
|
| 234 |
+
|
| 235 |
+
def __init__(self,
|
| 236 |
+
spec_channels,
|
| 237 |
+
segment_size,
|
| 238 |
+
inter_channels,
|
| 239 |
+
hidden_channels,
|
| 240 |
+
filter_channels,
|
| 241 |
+
n_heads,
|
| 242 |
+
n_layers,
|
| 243 |
+
kernel_size,
|
| 244 |
+
p_dropout,
|
| 245 |
+
resblock,
|
| 246 |
+
resblock_kernel_sizes,
|
| 247 |
+
resblock_dilation_sizes,
|
| 248 |
+
upsample_rates,
|
| 249 |
+
upsample_initial_channel,
|
| 250 |
+
upsample_kernel_sizes,
|
| 251 |
+
gin_channels,
|
| 252 |
+
ssl_dim,
|
| 253 |
+
n_speakers,
|
| 254 |
+
sampling_rate=44100,
|
| 255 |
+
**kwargs):
|
| 256 |
+
super().__init__()
|
| 257 |
+
self.spec_channels = spec_channels
|
| 258 |
+
self.inter_channels = inter_channels
|
| 259 |
+
self.hidden_channels = hidden_channels
|
| 260 |
+
self.filter_channels = filter_channels
|
| 261 |
+
self.n_heads = n_heads
|
| 262 |
+
self.n_layers = n_layers
|
| 263 |
+
self.kernel_size = kernel_size
|
| 264 |
+
self.p_dropout = p_dropout
|
| 265 |
+
self.resblock = resblock
|
| 266 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
| 267 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
| 268 |
+
self.upsample_rates = upsample_rates
|
| 269 |
+
self.upsample_initial_channel = upsample_initial_channel
|
| 270 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
| 271 |
+
self.segment_size = segment_size
|
| 272 |
+
self.gin_channels = gin_channels
|
| 273 |
+
self.ssl_dim = ssl_dim
|
| 274 |
+
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
| 275 |
+
|
| 276 |
+
self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2)
|
| 277 |
+
|
| 278 |
+
self.enc_p = TextEncoder(
|
| 279 |
+
inter_channels,
|
| 280 |
+
hidden_channels,
|
| 281 |
+
filter_channels=filter_channels,
|
| 282 |
+
n_heads=n_heads,
|
| 283 |
+
n_layers=n_layers,
|
| 284 |
+
kernel_size=kernel_size,
|
| 285 |
+
p_dropout=p_dropout
|
| 286 |
+
)
|
| 287 |
+
hps = {
|
| 288 |
+
"sampling_rate": sampling_rate,
|
| 289 |
+
"inter_channels": inter_channels,
|
| 290 |
+
"resblock": resblock,
|
| 291 |
+
"resblock_kernel_sizes": resblock_kernel_sizes,
|
| 292 |
+
"resblock_dilation_sizes": resblock_dilation_sizes,
|
| 293 |
+
"upsample_rates": upsample_rates,
|
| 294 |
+
"upsample_initial_channel": upsample_initial_channel,
|
| 295 |
+
"upsample_kernel_sizes": upsample_kernel_sizes,
|
| 296 |
+
"gin_channels": gin_channels,
|
| 297 |
+
}
|
| 298 |
+
self.dec = Generator(h=hps)
|
| 299 |
+
self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
|
| 300 |
+
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
| 301 |
+
self.f0_decoder = F0Decoder(
|
| 302 |
+
1,
|
| 303 |
+
hidden_channels,
|
| 304 |
+
filter_channels,
|
| 305 |
+
n_heads,
|
| 306 |
+
n_layers,
|
| 307 |
+
kernel_size,
|
| 308 |
+
p_dropout,
|
| 309 |
+
spk_channels=gin_channels
|
| 310 |
+
)
|
| 311 |
+
self.emb_uv = nn.Embedding(2, hidden_channels)
|
| 312 |
+
self.predict_f0 = False
|
| 313 |
+
|
| 314 |
+
def forward(self, c, f0, mel2ph, uv, noise=None, g=None):
|
| 315 |
+
|
| 316 |
+
decoder_inp = F.pad(c, [0, 0, 1, 0])
|
| 317 |
+
mel2ph_ = mel2ph.unsqueeze(2).repeat([1, 1, c.shape[-1]])
|
| 318 |
+
c = torch.gather(decoder_inp, 1, mel2ph_).transpose(1, 2) # [B, T, H]
|
| 319 |
+
|
| 320 |
+
c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
|
| 321 |
+
g = g.unsqueeze(0)
|
| 322 |
+
g = self.emb_g(g).transpose(1, 2)
|
| 323 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype)
|
| 324 |
+
x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1, 2)
|
| 325 |
+
|
| 326 |
+
if self.predict_f0:
|
| 327 |
+
lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500
|
| 328 |
+
norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False)
|
| 329 |
+
pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g)
|
| 330 |
+
f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1)
|
| 331 |
+
|
| 332 |
+
z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), z=noise)
|
| 333 |
+
z = self.flow(z_p, c_mask, g=g, reverse=True)
|
| 334 |
+
o = self.dec(z * c_mask, g=g, f0=f0)
|
| 335 |
+
return o
|
preprocess_flist_config.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import argparse
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from random import shuffle
|
| 7 |
+
import json
|
| 8 |
+
import wave
|
| 9 |
+
|
| 10 |
+
config_template = json.load(open("configs_template/config_template.json"))
|
| 11 |
+
|
| 12 |
+
pattern = re.compile(r'^[\.a-zA-Z0-9_\/]+$')
|
| 13 |
+
|
| 14 |
+
def get_wav_duration(file_path):
|
| 15 |
+
with wave.open(file_path, 'rb') as wav_file:
|
| 16 |
+
# 获取音频帧数
|
| 17 |
+
n_frames = wav_file.getnframes()
|
| 18 |
+
# 获取采样率
|
| 19 |
+
framerate = wav_file.getframerate()
|
| 20 |
+
# 计算时长(秒)
|
| 21 |
+
duration = n_frames / float(framerate)
|
| 22 |
+
return duration
|
| 23 |
+
|
| 24 |
+
if __name__ == "__main__":
|
| 25 |
+
parser = argparse.ArgumentParser()
|
| 26 |
+
parser.add_argument("--train_list", type=str, default="./filelists/train.txt", help="path to train list")
|
| 27 |
+
parser.add_argument("--val_list", type=str, default="./filelists/val.txt", help="path to val list")
|
| 28 |
+
parser.add_argument("--test_list", type=str, default="./filelists/test.txt", help="path to test list")
|
| 29 |
+
parser.add_argument("--source_dir", type=str, default="./dataset/44k", help="path to source dir")
|
| 30 |
+
args = parser.parse_args()
|
| 31 |
+
|
| 32 |
+
train = []
|
| 33 |
+
val = []
|
| 34 |
+
test = []
|
| 35 |
+
idx = 0
|
| 36 |
+
spk_dict = {}
|
| 37 |
+
spk_id = 0
|
| 38 |
+
for speaker in tqdm(os.listdir(args.source_dir)):
|
| 39 |
+
spk_dict[speaker] = spk_id
|
| 40 |
+
spk_id += 1
|
| 41 |
+
wavs = ["/".join([args.source_dir, speaker, i]) for i in os.listdir(os.path.join(args.source_dir, speaker))]
|
| 42 |
+
new_wavs = []
|
| 43 |
+
for file in wavs:
|
| 44 |
+
if not file.endswith("wav"):
|
| 45 |
+
continue
|
| 46 |
+
if not pattern.match(file):
|
| 47 |
+
print(f"warning:文件名{file}中包含非字母数字下划线,可能会导致错误。(也可能不会)")
|
| 48 |
+
if get_wav_duration(file) < 0.3:
|
| 49 |
+
print("skip too short audio:", file)
|
| 50 |
+
continue
|
| 51 |
+
new_wavs.append(file)
|
| 52 |
+
wavs = new_wavs
|
| 53 |
+
shuffle(wavs)
|
| 54 |
+
train += wavs[2:-2]
|
| 55 |
+
val += wavs[:2]
|
| 56 |
+
test += wavs[-2:]
|
| 57 |
+
|
| 58 |
+
shuffle(train)
|
| 59 |
+
shuffle(val)
|
| 60 |
+
shuffle(test)
|
| 61 |
+
|
| 62 |
+
print("Writing", args.train_list)
|
| 63 |
+
with open(args.train_list, "w") as f:
|
| 64 |
+
for fname in tqdm(train):
|
| 65 |
+
wavpath = fname
|
| 66 |
+
f.write(wavpath + "\n")
|
| 67 |
+
|
| 68 |
+
print("Writing", args.val_list)
|
| 69 |
+
with open(args.val_list, "w") as f:
|
| 70 |
+
for fname in tqdm(val):
|
| 71 |
+
wavpath = fname
|
| 72 |
+
f.write(wavpath + "\n")
|
| 73 |
+
|
| 74 |
+
print("Writing", args.test_list)
|
| 75 |
+
with open(args.test_list, "w") as f:
|
| 76 |
+
for fname in tqdm(test):
|
| 77 |
+
wavpath = fname
|
| 78 |
+
f.write(wavpath + "\n")
|
| 79 |
+
|
| 80 |
+
config_template["spk"] = spk_dict
|
| 81 |
+
print("Writing configs/config.json")
|
| 82 |
+
with open("configs/config.json", "w") as f:
|
| 83 |
+
json.dump(config_template, f, indent=2)
|
preprocess_hubert_f0.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import multiprocessing
|
| 3 |
+
import os
|
| 4 |
+
import argparse
|
| 5 |
+
from random import shuffle
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from glob import glob
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
import utils
|
| 12 |
+
import logging
|
| 13 |
+
logging.getLogger('numba').setLevel(logging.WARNING)
|
| 14 |
+
import librosa
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
hps = utils.get_hparams_from_file("configs/config.json")
|
| 18 |
+
sampling_rate = hps.data.sampling_rate
|
| 19 |
+
hop_length = hps.data.hop_length
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def process_one(filename, hmodel):
|
| 23 |
+
# print(filename)
|
| 24 |
+
wav, sr = librosa.load(filename, sr=sampling_rate)
|
| 25 |
+
soft_path = filename + ".soft.pt"
|
| 26 |
+
if not os.path.exists(soft_path):
|
| 27 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 28 |
+
wav16k = librosa.resample(wav, orig_sr=sampling_rate, target_sr=16000)
|
| 29 |
+
wav16k = torch.from_numpy(wav16k).to(device)
|
| 30 |
+
c = utils.get_hubert_content(hmodel, wav_16k_tensor=wav16k)
|
| 31 |
+
torch.save(c.cpu(), soft_path)
|
| 32 |
+
f0_path = filename + ".f0.npy"
|
| 33 |
+
if not os.path.exists(f0_path):
|
| 34 |
+
f0 = utils.compute_f0_dio(wav, sampling_rate=sampling_rate, hop_length=hop_length)
|
| 35 |
+
np.save(f0_path, f0)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def process_batch(filenames):
|
| 39 |
+
print("Loading hubert for content...")
|
| 40 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 41 |
+
hmodel = utils.get_hubert_model().to(device)
|
| 42 |
+
print("Loaded hubert.")
|
| 43 |
+
for filename in tqdm(filenames):
|
| 44 |
+
process_one(filename, hmodel)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
if __name__ == "__main__":
|
| 48 |
+
parser = argparse.ArgumentParser()
|
| 49 |
+
parser.add_argument("--in_dir", type=str, default="dataset/44k", help="path to input dir")
|
| 50 |
+
|
| 51 |
+
args = parser.parse_args()
|
| 52 |
+
filenames = glob(f'{args.in_dir}/*/*.wav', recursive=True) # [:10]
|
| 53 |
+
shuffle(filenames)
|
| 54 |
+
multiprocessing.set_start_method('spawn',force=True)
|
| 55 |
+
|
| 56 |
+
num_processes = 1
|
| 57 |
+
chunk_size = int(math.ceil(len(filenames) / num_processes))
|
| 58 |
+
chunks = [filenames[i:i + chunk_size] for i in range(0, len(filenames), chunk_size)]
|
| 59 |
+
print([len(c) for c in chunks])
|
| 60 |
+
processes = [multiprocessing.Process(target=process_batch, args=(chunk,)) for chunk in chunks]
|
| 61 |
+
for p in processes:
|
| 62 |
+
p.start()
|
raw/put_raw_wav_here
ADDED
|
File without changes
|
requirements.txt
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Flask
|
| 2 |
+
Flask_Cors
|
| 3 |
+
gradio
|
| 4 |
+
numpy
|
| 5 |
+
pyworld==0.2.5
|
| 6 |
+
scipy==1.7.3
|
| 7 |
+
SoundFile==0.12.1
|
| 8 |
+
torch==1.13.1
|
| 9 |
+
torchaudio==0.13.1
|
| 10 |
+
tqdm
|
| 11 |
+
scikit-maad
|
| 12 |
+
praat-parselmouth
|
| 13 |
+
onnx
|
| 14 |
+
onnxsim
|
| 15 |
+
onnxoptimizer
|
| 16 |
+
fairseq==0.12.2
|
| 17 |
+
librosa==0.8.1
|
| 18 |
+
tensorboard
|
requirements_win.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
librosa==0.9.2
|
| 2 |
+
fairseq==0.12.2
|
| 3 |
+
Flask==2.1.2
|
| 4 |
+
Flask_Cors==3.0.10
|
| 5 |
+
gradio==3.4.1
|
| 6 |
+
numpy==1.20.0
|
| 7 |
+
playsound==1.3.0
|
| 8 |
+
PyAudio==0.2.12
|
| 9 |
+
pydub==0.25.1
|
| 10 |
+
pyworld==0.3.0
|
| 11 |
+
requests==2.28.1
|
| 12 |
+
scipy==1.7.3
|
| 13 |
+
sounddevice==0.4.5
|
| 14 |
+
SoundFile==0.10.3.post1
|
| 15 |
+
starlette==0.19.1
|
| 16 |
+
tqdm==4.63.0
|
| 17 |
+
scikit-maad
|
| 18 |
+
praat-parselmouth
|
| 19 |
+
onnx
|
| 20 |
+
onnxsim
|
| 21 |
+
onnxoptimizer
|
resample.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import argparse
|
| 3 |
+
import librosa
|
| 4 |
+
import numpy as np
|
| 5 |
+
from multiprocessing import Pool, cpu_count
|
| 6 |
+
from scipy.io import wavfile
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def process(item):
|
| 11 |
+
spkdir, wav_name, args = item
|
| 12 |
+
# speaker 's5', 'p280', 'p315' are excluded,
|
| 13 |
+
speaker = spkdir.replace("\\", "/").split("/")[-1]
|
| 14 |
+
wav_path = os.path.join(args.in_dir, speaker, wav_name)
|
| 15 |
+
if os.path.exists(wav_path) and '.wav' in wav_path:
|
| 16 |
+
os.makedirs(os.path.join(args.out_dir2, speaker), exist_ok=True)
|
| 17 |
+
wav, sr = librosa.load(wav_path, sr=None)
|
| 18 |
+
wav, _ = librosa.effects.trim(wav, top_db=20)
|
| 19 |
+
peak = np.abs(wav).max()
|
| 20 |
+
if peak > 1.0:
|
| 21 |
+
wav = 0.98 * wav / peak
|
| 22 |
+
wav2 = librosa.resample(wav, orig_sr=sr, target_sr=args.sr2)
|
| 23 |
+
wav2 /= max(wav2.max(), -wav2.min())
|
| 24 |
+
save_name = wav_name
|
| 25 |
+
save_path2 = os.path.join(args.out_dir2, speaker, save_name)
|
| 26 |
+
wavfile.write(
|
| 27 |
+
save_path2,
|
| 28 |
+
args.sr2,
|
| 29 |
+
(wav2 * np.iinfo(np.int16).max).astype(np.int16)
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
if __name__ == "__main__":
|
| 35 |
+
parser = argparse.ArgumentParser()
|
| 36 |
+
parser.add_argument("--sr2", type=int, default=44100, help="sampling rate")
|
| 37 |
+
parser.add_argument("--in_dir", type=str, default="./dataset_raw", help="path to source dir")
|
| 38 |
+
parser.add_argument("--out_dir2", type=str, default="./dataset/44k", help="path to target dir")
|
| 39 |
+
args = parser.parse_args()
|
| 40 |
+
processs = cpu_count()-2 if cpu_count() >4 else 1
|
| 41 |
+
pool = Pool(processes=processs)
|
| 42 |
+
|
| 43 |
+
for speaker in os.listdir(args.in_dir):
|
| 44 |
+
spk_dir = os.path.join(args.in_dir, speaker)
|
| 45 |
+
if os.path.isdir(spk_dir):
|
| 46 |
+
print(spk_dir)
|
| 47 |
+
for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
|
| 48 |
+
pass
|
spec_gen.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from data_utils import TextAudioSpeakerLoader
|
| 2 |
+
import json
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
|
| 5 |
+
from utils import HParams
|
| 6 |
+
|
| 7 |
+
config_path = 'configs/config.json'
|
| 8 |
+
with open(config_path, "r") as f:
|
| 9 |
+
data = f.read()
|
| 10 |
+
config = json.loads(data)
|
| 11 |
+
hps = HParams(**config)
|
| 12 |
+
|
| 13 |
+
train_dataset = TextAudioSpeakerLoader("filelists/train.txt", hps)
|
| 14 |
+
test_dataset = TextAudioSpeakerLoader("filelists/test.txt", hps)
|
| 15 |
+
eval_dataset = TextAudioSpeakerLoader("filelists/val.txt", hps)
|
| 16 |
+
|
| 17 |
+
for _ in tqdm(train_dataset):
|
| 18 |
+
pass
|
| 19 |
+
for _ in tqdm(eval_dataset):
|
| 20 |
+
pass
|
| 21 |
+
for _ in tqdm(test_dataset):
|
| 22 |
+
pass
|
train.py
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import multiprocessing
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
logging.getLogger('matplotlib').setLevel(logging.WARNING)
|
| 6 |
+
import os
|
| 7 |
+
import json
|
| 8 |
+
import argparse
|
| 9 |
+
import itertools
|
| 10 |
+
import math
|
| 11 |
+
import torch
|
| 12 |
+
from torch import nn, optim
|
| 13 |
+
from torch.nn import functional as F
|
| 14 |
+
from torch.utils.data import DataLoader
|
| 15 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 16 |
+
import torch.multiprocessing as mp
|
| 17 |
+
import torch.distributed as dist
|
| 18 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
| 19 |
+
from torch.cuda.amp import autocast, GradScaler
|
| 20 |
+
|
| 21 |
+
import modules.commons as commons
|
| 22 |
+
import utils
|
| 23 |
+
from data_utils import TextAudioSpeakerLoader, TextAudioCollate
|
| 24 |
+
from models import (
|
| 25 |
+
SynthesizerTrn,
|
| 26 |
+
MultiPeriodDiscriminator,
|
| 27 |
+
)
|
| 28 |
+
from modules.losses import (
|
| 29 |
+
kl_loss,
|
| 30 |
+
generator_loss, discriminator_loss, feature_loss
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
from modules.mel_processing import mel_spectrogram_torch, spec_to_mel_torch
|
| 34 |
+
|
| 35 |
+
torch.backends.cudnn.benchmark = True
|
| 36 |
+
global_step = 0
|
| 37 |
+
start_time = time.time()
|
| 38 |
+
|
| 39 |
+
# os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'INFO'
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def main():
|
| 43 |
+
"""Assume Single Node Multi GPUs Training Only"""
|
| 44 |
+
assert torch.cuda.is_available(), "CPU training is not allowed."
|
| 45 |
+
hps = utils.get_hparams()
|
| 46 |
+
|
| 47 |
+
n_gpus = torch.cuda.device_count()
|
| 48 |
+
os.environ['MASTER_ADDR'] = 'localhost'
|
| 49 |
+
os.environ['MASTER_PORT'] = hps.train.port
|
| 50 |
+
|
| 51 |
+
mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def run(rank, n_gpus, hps):
|
| 55 |
+
global global_step
|
| 56 |
+
if rank == 0:
|
| 57 |
+
logger = utils.get_logger(hps.model_dir)
|
| 58 |
+
logger.info(hps)
|
| 59 |
+
utils.check_git_hash(hps.model_dir)
|
| 60 |
+
writer = SummaryWriter(log_dir=hps.model_dir)
|
| 61 |
+
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
|
| 62 |
+
|
| 63 |
+
# for pytorch on win, backend use gloo
|
| 64 |
+
dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank)
|
| 65 |
+
torch.manual_seed(hps.train.seed)
|
| 66 |
+
torch.cuda.set_device(rank)
|
| 67 |
+
collate_fn = TextAudioCollate()
|
| 68 |
+
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps)
|
| 69 |
+
num_workers = 5 if multiprocessing.cpu_count() > 4 else multiprocessing.cpu_count()
|
| 70 |
+
train_loader = DataLoader(train_dataset, num_workers=num_workers, shuffle=False, pin_memory=True,
|
| 71 |
+
batch_size=hps.train.batch_size, collate_fn=collate_fn)
|
| 72 |
+
if rank == 0:
|
| 73 |
+
eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps)
|
| 74 |
+
eval_loader = DataLoader(eval_dataset, num_workers=1, shuffle=False,
|
| 75 |
+
batch_size=1, pin_memory=False,
|
| 76 |
+
drop_last=False, collate_fn=collate_fn)
|
| 77 |
+
|
| 78 |
+
net_g = SynthesizerTrn(
|
| 79 |
+
hps.data.filter_length // 2 + 1,
|
| 80 |
+
hps.train.segment_size // hps.data.hop_length,
|
| 81 |
+
**hps.model).cuda(rank)
|
| 82 |
+
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
|
| 83 |
+
optim_g = torch.optim.AdamW(
|
| 84 |
+
net_g.parameters(),
|
| 85 |
+
hps.train.learning_rate,
|
| 86 |
+
betas=hps.train.betas,
|
| 87 |
+
eps=hps.train.eps)
|
| 88 |
+
optim_d = torch.optim.AdamW(
|
| 89 |
+
net_d.parameters(),
|
| 90 |
+
hps.train.learning_rate,
|
| 91 |
+
betas=hps.train.betas,
|
| 92 |
+
eps=hps.train.eps)
|
| 93 |
+
net_g = DDP(net_g, device_ids=[rank]) # , find_unused_parameters=True)
|
| 94 |
+
net_d = DDP(net_d, device_ids=[rank])
|
| 95 |
+
|
| 96 |
+
skip_optimizer = False
|
| 97 |
+
try:
|
| 98 |
+
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g,
|
| 99 |
+
optim_g, skip_optimizer)
|
| 100 |
+
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
|
| 101 |
+
optim_d, skip_optimizer)
|
| 102 |
+
epoch_str = max(epoch_str, 1)
|
| 103 |
+
global_step = (epoch_str - 1) * len(train_loader)
|
| 104 |
+
except:
|
| 105 |
+
print("load old checkpoint failed...")
|
| 106 |
+
epoch_str = 1
|
| 107 |
+
global_step = 0
|
| 108 |
+
if skip_optimizer:
|
| 109 |
+
epoch_str = 1
|
| 110 |
+
global_step = 0
|
| 111 |
+
|
| 112 |
+
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
|
| 113 |
+
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
|
| 114 |
+
|
| 115 |
+
scaler = GradScaler(enabled=hps.train.fp16_run)
|
| 116 |
+
|
| 117 |
+
for epoch in range(epoch_str, hps.train.epochs + 1):
|
| 118 |
+
if rank == 0:
|
| 119 |
+
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
|
| 120 |
+
[train_loader, eval_loader], logger, [writer, writer_eval])
|
| 121 |
+
else:
|
| 122 |
+
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
|
| 123 |
+
[train_loader, None], None, None)
|
| 124 |
+
scheduler_g.step()
|
| 125 |
+
scheduler_d.step()
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
|
| 129 |
+
net_g, net_d = nets
|
| 130 |
+
optim_g, optim_d = optims
|
| 131 |
+
scheduler_g, scheduler_d = schedulers
|
| 132 |
+
train_loader, eval_loader = loaders
|
| 133 |
+
if writers is not None:
|
| 134 |
+
writer, writer_eval = writers
|
| 135 |
+
|
| 136 |
+
# train_loader.batch_sampler.set_epoch(epoch)
|
| 137 |
+
global global_step
|
| 138 |
+
|
| 139 |
+
net_g.train()
|
| 140 |
+
net_d.train()
|
| 141 |
+
for batch_idx, items in enumerate(train_loader):
|
| 142 |
+
c, f0, spec, y, spk, lengths, uv = items
|
| 143 |
+
g = spk.cuda(rank, non_blocking=True)
|
| 144 |
+
spec, y = spec.cuda(rank, non_blocking=True), y.cuda(rank, non_blocking=True)
|
| 145 |
+
c = c.cuda(rank, non_blocking=True)
|
| 146 |
+
f0 = f0.cuda(rank, non_blocking=True)
|
| 147 |
+
uv = uv.cuda(rank, non_blocking=True)
|
| 148 |
+
lengths = lengths.cuda(rank, non_blocking=True)
|
| 149 |
+
mel = spec_to_mel_torch(
|
| 150 |
+
spec,
|
| 151 |
+
hps.data.filter_length,
|
| 152 |
+
hps.data.n_mel_channels,
|
| 153 |
+
hps.data.sampling_rate,
|
| 154 |
+
hps.data.mel_fmin,
|
| 155 |
+
hps.data.mel_fmax)
|
| 156 |
+
|
| 157 |
+
with autocast(enabled=hps.train.fp16_run):
|
| 158 |
+
y_hat, ids_slice, z_mask, \
|
| 159 |
+
(z, z_p, m_p, logs_p, m_q, logs_q), pred_lf0, norm_lf0, lf0 = net_g(c, f0, uv, spec, g=g, c_lengths=lengths,
|
| 160 |
+
spec_lengths=lengths)
|
| 161 |
+
|
| 162 |
+
y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
|
| 163 |
+
y_hat_mel = mel_spectrogram_torch(
|
| 164 |
+
y_hat.squeeze(1),
|
| 165 |
+
hps.data.filter_length,
|
| 166 |
+
hps.data.n_mel_channels,
|
| 167 |
+
hps.data.sampling_rate,
|
| 168 |
+
hps.data.hop_length,
|
| 169 |
+
hps.data.win_length,
|
| 170 |
+
hps.data.mel_fmin,
|
| 171 |
+
hps.data.mel_fmax
|
| 172 |
+
)
|
| 173 |
+
y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
|
| 174 |
+
|
| 175 |
+
# Discriminator
|
| 176 |
+
y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
|
| 177 |
+
|
| 178 |
+
with autocast(enabled=False):
|
| 179 |
+
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
|
| 180 |
+
loss_disc_all = loss_disc
|
| 181 |
+
|
| 182 |
+
optim_d.zero_grad()
|
| 183 |
+
scaler.scale(loss_disc_all).backward()
|
| 184 |
+
scaler.unscale_(optim_d)
|
| 185 |
+
grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
|
| 186 |
+
scaler.step(optim_d)
|
| 187 |
+
|
| 188 |
+
with autocast(enabled=hps.train.fp16_run):
|
| 189 |
+
# Generator
|
| 190 |
+
y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
|
| 191 |
+
with autocast(enabled=False):
|
| 192 |
+
loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
|
| 193 |
+
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
|
| 194 |
+
loss_fm = feature_loss(fmap_r, fmap_g)
|
| 195 |
+
loss_gen, losses_gen = generator_loss(y_d_hat_g)
|
| 196 |
+
loss_lf0 = F.mse_loss(pred_lf0, lf0)
|
| 197 |
+
loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl + loss_lf0
|
| 198 |
+
optim_g.zero_grad()
|
| 199 |
+
scaler.scale(loss_gen_all).backward()
|
| 200 |
+
scaler.unscale_(optim_g)
|
| 201 |
+
grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
|
| 202 |
+
scaler.step(optim_g)
|
| 203 |
+
scaler.update()
|
| 204 |
+
|
| 205 |
+
if rank == 0:
|
| 206 |
+
if global_step % hps.train.log_interval == 0:
|
| 207 |
+
lr = optim_g.param_groups[0]['lr']
|
| 208 |
+
losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_kl]
|
| 209 |
+
logger.info('Train Epoch: {} [{:.0f}%]'.format(
|
| 210 |
+
epoch,
|
| 211 |
+
100. * batch_idx / len(train_loader)))
|
| 212 |
+
logger.info(f"Losses: {[x.item() for x in losses]}, step: {global_step}, lr: {lr}")
|
| 213 |
+
|
| 214 |
+
scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr,
|
| 215 |
+
"grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
|
| 216 |
+
scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl,
|
| 217 |
+
"loss/g/lf0": loss_lf0})
|
| 218 |
+
|
| 219 |
+
# scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
|
| 220 |
+
# scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
|
| 221 |
+
# scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
|
| 222 |
+
image_dict = {
|
| 223 |
+
"slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
|
| 224 |
+
"slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
|
| 225 |
+
"all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
|
| 226 |
+
"all/lf0": utils.plot_data_to_numpy(lf0[0, 0, :].cpu().numpy(),
|
| 227 |
+
pred_lf0[0, 0, :].detach().cpu().numpy()),
|
| 228 |
+
"all/norm_lf0": utils.plot_data_to_numpy(lf0[0, 0, :].cpu().numpy(),
|
| 229 |
+
norm_lf0[0, 0, :].detach().cpu().numpy())
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
utils.summarize(
|
| 233 |
+
writer=writer,
|
| 234 |
+
global_step=global_step,
|
| 235 |
+
images=image_dict,
|
| 236 |
+
scalars=scalar_dict
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
if global_step % hps.train.eval_interval == 0:
|
| 240 |
+
evaluate(hps, net_g, eval_loader, writer_eval)
|
| 241 |
+
utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,
|
| 242 |
+
os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
|
| 243 |
+
utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,
|
| 244 |
+
os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
|
| 245 |
+
keep_ckpts = getattr(hps.train, 'keep_ckpts', 0)
|
| 246 |
+
if keep_ckpts > 0:
|
| 247 |
+
utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True)
|
| 248 |
+
|
| 249 |
+
global_step += 1
|
| 250 |
+
|
| 251 |
+
if rank == 0:
|
| 252 |
+
global start_time
|
| 253 |
+
now = time.time()
|
| 254 |
+
durtaion = format(now - start_time, '.2f')
|
| 255 |
+
logger.info(f'====> Epoch: {epoch}, cost {durtaion} s')
|
| 256 |
+
start_time = now
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def evaluate(hps, generator, eval_loader, writer_eval):
|
| 260 |
+
generator.eval()
|
| 261 |
+
image_dict = {}
|
| 262 |
+
audio_dict = {}
|
| 263 |
+
with torch.no_grad():
|
| 264 |
+
for batch_idx, items in enumerate(eval_loader):
|
| 265 |
+
c, f0, spec, y, spk, _, uv = items
|
| 266 |
+
g = spk[:1].cuda(0)
|
| 267 |
+
spec, y = spec[:1].cuda(0), y[:1].cuda(0)
|
| 268 |
+
c = c[:1].cuda(0)
|
| 269 |
+
f0 = f0[:1].cuda(0)
|
| 270 |
+
uv= uv[:1].cuda(0)
|
| 271 |
+
mel = spec_to_mel_torch(
|
| 272 |
+
spec,
|
| 273 |
+
hps.data.filter_length,
|
| 274 |
+
hps.data.n_mel_channels,
|
| 275 |
+
hps.data.sampling_rate,
|
| 276 |
+
hps.data.mel_fmin,
|
| 277 |
+
hps.data.mel_fmax)
|
| 278 |
+
y_hat = generator.module.infer(c, f0, uv, g=g)
|
| 279 |
+
|
| 280 |
+
y_hat_mel = mel_spectrogram_torch(
|
| 281 |
+
y_hat.squeeze(1).float(),
|
| 282 |
+
hps.data.filter_length,
|
| 283 |
+
hps.data.n_mel_channels,
|
| 284 |
+
hps.data.sampling_rate,
|
| 285 |
+
hps.data.hop_length,
|
| 286 |
+
hps.data.win_length,
|
| 287 |
+
hps.data.mel_fmin,
|
| 288 |
+
hps.data.mel_fmax
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
audio_dict.update({
|
| 292 |
+
f"gen/audio_{batch_idx}": y_hat[0],
|
| 293 |
+
f"gt/audio_{batch_idx}": y[0]
|
| 294 |
+
})
|
| 295 |
+
image_dict.update({
|
| 296 |
+
f"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()),
|
| 297 |
+
"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())
|
| 298 |
+
})
|
| 299 |
+
utils.summarize(
|
| 300 |
+
writer=writer_eval,
|
| 301 |
+
global_step=global_step,
|
| 302 |
+
images=image_dict,
|
| 303 |
+
audios=audio_dict,
|
| 304 |
+
audio_sampling_rate=hps.data.sampling_rate
|
| 305 |
+
)
|
| 306 |
+
generator.train()
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
if __name__ == "__main__":
|
| 310 |
+
main()
|
utils.py
ADDED
|
@@ -0,0 +1,502 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
import argparse
|
| 6 |
+
import logging
|
| 7 |
+
import json
|
| 8 |
+
import subprocess
|
| 9 |
+
import random
|
| 10 |
+
|
| 11 |
+
import librosa
|
| 12 |
+
import numpy as np
|
| 13 |
+
from scipy.io.wavfile import read
|
| 14 |
+
import torch
|
| 15 |
+
from torch.nn import functional as F
|
| 16 |
+
from modules.commons import sequence_mask
|
| 17 |
+
from hubert import hubert_model
|
| 18 |
+
MATPLOTLIB_FLAG = False
|
| 19 |
+
|
| 20 |
+
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
| 21 |
+
logger = logging
|
| 22 |
+
|
| 23 |
+
f0_bin = 256
|
| 24 |
+
f0_max = 1100.0
|
| 25 |
+
f0_min = 50.0
|
| 26 |
+
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
| 27 |
+
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# def normalize_f0(f0, random_scale=True):
|
| 31 |
+
# f0_norm = f0.clone() # create a copy of the input Tensor
|
| 32 |
+
# batch_size, _, frame_length = f0_norm.shape
|
| 33 |
+
# for i in range(batch_size):
|
| 34 |
+
# means = torch.mean(f0_norm[i, 0, :])
|
| 35 |
+
# if random_scale:
|
| 36 |
+
# factor = random.uniform(0.8, 1.2)
|
| 37 |
+
# else:
|
| 38 |
+
# factor = 1
|
| 39 |
+
# f0_norm[i, 0, :] = (f0_norm[i, 0, :] - means) * factor
|
| 40 |
+
# return f0_norm
|
| 41 |
+
# def normalize_f0(f0, random_scale=True):
|
| 42 |
+
# means = torch.mean(f0[:, 0, :], dim=1, keepdim=True)
|
| 43 |
+
# if random_scale:
|
| 44 |
+
# factor = torch.Tensor(f0.shape[0],1).uniform_(0.8, 1.2).to(f0.device)
|
| 45 |
+
# else:
|
| 46 |
+
# factor = torch.ones(f0.shape[0], 1, 1).to(f0.device)
|
| 47 |
+
# f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1)
|
| 48 |
+
# return f0_norm
|
| 49 |
+
def normalize_f0(f0, x_mask, uv, random_scale=True):
|
| 50 |
+
# calculate means based on x_mask
|
| 51 |
+
uv_sum = torch.sum(uv, dim=1, keepdim=True)
|
| 52 |
+
uv_sum[uv_sum == 0] = 9999
|
| 53 |
+
means = torch.sum(f0[:, 0, :] * uv, dim=1, keepdim=True) / uv_sum
|
| 54 |
+
|
| 55 |
+
if random_scale:
|
| 56 |
+
factor = torch.Tensor(f0.shape[0], 1).uniform_(0.8, 1.2).to(f0.device)
|
| 57 |
+
else:
|
| 58 |
+
factor = torch.ones(f0.shape[0], 1).to(f0.device)
|
| 59 |
+
# normalize f0 based on means and factor
|
| 60 |
+
f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1)
|
| 61 |
+
if torch.isnan(f0_norm).any():
|
| 62 |
+
exit(0)
|
| 63 |
+
return f0_norm * x_mask
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def plot_data_to_numpy(x, y):
|
| 67 |
+
global MATPLOTLIB_FLAG
|
| 68 |
+
if not MATPLOTLIB_FLAG:
|
| 69 |
+
import matplotlib
|
| 70 |
+
matplotlib.use("Agg")
|
| 71 |
+
MATPLOTLIB_FLAG = True
|
| 72 |
+
mpl_logger = logging.getLogger('matplotlib')
|
| 73 |
+
mpl_logger.setLevel(logging.WARNING)
|
| 74 |
+
import matplotlib.pylab as plt
|
| 75 |
+
import numpy as np
|
| 76 |
+
|
| 77 |
+
fig, ax = plt.subplots(figsize=(10, 2))
|
| 78 |
+
plt.plot(x)
|
| 79 |
+
plt.plot(y)
|
| 80 |
+
plt.tight_layout()
|
| 81 |
+
|
| 82 |
+
fig.canvas.draw()
|
| 83 |
+
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
| 84 |
+
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
| 85 |
+
plt.close()
|
| 86 |
+
return data
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def interpolate_f0(f0):
|
| 91 |
+
'''
|
| 92 |
+
对F0进行插值处理
|
| 93 |
+
'''
|
| 94 |
+
|
| 95 |
+
data = np.reshape(f0, (f0.size, 1))
|
| 96 |
+
|
| 97 |
+
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
| 98 |
+
vuv_vector[data > 0.0] = 1.0
|
| 99 |
+
vuv_vector[data <= 0.0] = 0.0
|
| 100 |
+
|
| 101 |
+
ip_data = data
|
| 102 |
+
|
| 103 |
+
frame_number = data.size
|
| 104 |
+
last_value = 0.0
|
| 105 |
+
for i in range(frame_number):
|
| 106 |
+
if data[i] <= 0.0:
|
| 107 |
+
j = i + 1
|
| 108 |
+
for j in range(i + 1, frame_number):
|
| 109 |
+
if data[j] > 0.0:
|
| 110 |
+
break
|
| 111 |
+
if j < frame_number - 1:
|
| 112 |
+
if last_value > 0.0:
|
| 113 |
+
step = (data[j] - data[i - 1]) / float(j - i)
|
| 114 |
+
for k in range(i, j):
|
| 115 |
+
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
| 116 |
+
else:
|
| 117 |
+
for k in range(i, j):
|
| 118 |
+
ip_data[k] = data[j]
|
| 119 |
+
else:
|
| 120 |
+
for k in range(i, frame_number):
|
| 121 |
+
ip_data[k] = last_value
|
| 122 |
+
else:
|
| 123 |
+
ip_data[i] = data[i]
|
| 124 |
+
last_value = data[i]
|
| 125 |
+
|
| 126 |
+
return ip_data[:,0], vuv_vector[:,0]
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def compute_f0_parselmouth(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
|
| 130 |
+
import parselmouth
|
| 131 |
+
x = wav_numpy
|
| 132 |
+
if p_len is None:
|
| 133 |
+
p_len = x.shape[0]//hop_length
|
| 134 |
+
else:
|
| 135 |
+
assert abs(p_len-x.shape[0]//hop_length) < 4, "pad length error"
|
| 136 |
+
time_step = hop_length / sampling_rate * 1000
|
| 137 |
+
f0_min = 50
|
| 138 |
+
f0_max = 1100
|
| 139 |
+
f0 = parselmouth.Sound(x, sampling_rate).to_pitch_ac(
|
| 140 |
+
time_step=time_step / 1000, voicing_threshold=0.6,
|
| 141 |
+
pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
|
| 142 |
+
|
| 143 |
+
pad_size=(p_len - len(f0) + 1) // 2
|
| 144 |
+
if(pad_size>0 or p_len - len(f0) - pad_size>0):
|
| 145 |
+
f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
|
| 146 |
+
return f0
|
| 147 |
+
|
| 148 |
+
def resize_f0(x, target_len):
|
| 149 |
+
source = np.array(x)
|
| 150 |
+
source[source<0.001] = np.nan
|
| 151 |
+
target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source)
|
| 152 |
+
res = np.nan_to_num(target)
|
| 153 |
+
return res
|
| 154 |
+
|
| 155 |
+
def compute_f0_dio(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
|
| 156 |
+
import pyworld
|
| 157 |
+
if p_len is None:
|
| 158 |
+
p_len = wav_numpy.shape[0]//hop_length
|
| 159 |
+
f0, t = pyworld.dio(
|
| 160 |
+
wav_numpy.astype(np.double),
|
| 161 |
+
fs=sampling_rate,
|
| 162 |
+
f0_ceil=800,
|
| 163 |
+
frame_period=1000 * hop_length / sampling_rate,
|
| 164 |
+
)
|
| 165 |
+
f0 = pyworld.stonemask(wav_numpy.astype(np.double), f0, t, sampling_rate)
|
| 166 |
+
for index, pitch in enumerate(f0):
|
| 167 |
+
f0[index] = round(pitch, 1)
|
| 168 |
+
return resize_f0(f0, p_len)
|
| 169 |
+
|
| 170 |
+
def f0_to_coarse(f0):
|
| 171 |
+
is_torch = isinstance(f0, torch.Tensor)
|
| 172 |
+
f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
|
| 173 |
+
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
|
| 174 |
+
|
| 175 |
+
f0_mel[f0_mel <= 1] = 1
|
| 176 |
+
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
|
| 177 |
+
f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
|
| 178 |
+
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
|
| 179 |
+
return f0_coarse
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def get_hubert_model():
|
| 183 |
+
vec_path = "hubert/checkpoint_best_legacy_500.pt"
|
| 184 |
+
print("load model(s) from {}".format(vec_path))
|
| 185 |
+
from fairseq import checkpoint_utils
|
| 186 |
+
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
|
| 187 |
+
[vec_path],
|
| 188 |
+
suffix="",
|
| 189 |
+
)
|
| 190 |
+
model = models[0]
|
| 191 |
+
model.eval()
|
| 192 |
+
return model
|
| 193 |
+
|
| 194 |
+
def get_hubert_content(hmodel, wav_16k_tensor):
|
| 195 |
+
feats = wav_16k_tensor
|
| 196 |
+
if feats.dim() == 2: # double channels
|
| 197 |
+
feats = feats.mean(-1)
|
| 198 |
+
assert feats.dim() == 1, feats.dim()
|
| 199 |
+
feats = feats.view(1, -1)
|
| 200 |
+
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
|
| 201 |
+
inputs = {
|
| 202 |
+
"source": feats.to(wav_16k_tensor.device),
|
| 203 |
+
"padding_mask": padding_mask.to(wav_16k_tensor.device),
|
| 204 |
+
"output_layer": 9, # layer 9
|
| 205 |
+
}
|
| 206 |
+
with torch.no_grad():
|
| 207 |
+
logits = hmodel.extract_features(**inputs)
|
| 208 |
+
feats = hmodel.final_proj(logits[0])
|
| 209 |
+
return feats.transpose(1, 2)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def get_content(cmodel, y):
|
| 213 |
+
with torch.no_grad():
|
| 214 |
+
c = cmodel.extract_features(y.squeeze(1))[0]
|
| 215 |
+
c = c.transpose(1, 2)
|
| 216 |
+
return c
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False):
|
| 221 |
+
assert os.path.isfile(checkpoint_path)
|
| 222 |
+
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
| 223 |
+
iteration = checkpoint_dict['iteration']
|
| 224 |
+
learning_rate = checkpoint_dict['learning_rate']
|
| 225 |
+
if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None:
|
| 226 |
+
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
| 227 |
+
saved_state_dict = checkpoint_dict['model']
|
| 228 |
+
if hasattr(model, 'module'):
|
| 229 |
+
state_dict = model.module.state_dict()
|
| 230 |
+
else:
|
| 231 |
+
state_dict = model.state_dict()
|
| 232 |
+
new_state_dict = {}
|
| 233 |
+
for k, v in state_dict.items():
|
| 234 |
+
try:
|
| 235 |
+
# assert "dec" in k or "disc" in k
|
| 236 |
+
# print("load", k)
|
| 237 |
+
new_state_dict[k] = saved_state_dict[k]
|
| 238 |
+
assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape)
|
| 239 |
+
except:
|
| 240 |
+
print("error, %s is not in the checkpoint" % k)
|
| 241 |
+
logger.info("%s is not in the checkpoint" % k)
|
| 242 |
+
new_state_dict[k] = v
|
| 243 |
+
if hasattr(model, 'module'):
|
| 244 |
+
model.module.load_state_dict(new_state_dict)
|
| 245 |
+
else:
|
| 246 |
+
model.load_state_dict(new_state_dict)
|
| 247 |
+
print("load ")
|
| 248 |
+
logger.info("Loaded checkpoint '{}' (iteration {})".format(
|
| 249 |
+
checkpoint_path, iteration))
|
| 250 |
+
return model, optimizer, learning_rate, iteration
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
|
| 254 |
+
logger.info("Saving model and optimizer state at iteration {} to {}".format(
|
| 255 |
+
iteration, checkpoint_path))
|
| 256 |
+
if hasattr(model, 'module'):
|
| 257 |
+
state_dict = model.module.state_dict()
|
| 258 |
+
else:
|
| 259 |
+
state_dict = model.state_dict()
|
| 260 |
+
torch.save({'model': state_dict,
|
| 261 |
+
'iteration': iteration,
|
| 262 |
+
'optimizer': optimizer.state_dict(),
|
| 263 |
+
'learning_rate': learning_rate}, checkpoint_path)
|
| 264 |
+
|
| 265 |
+
def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True):
|
| 266 |
+
"""Freeing up space by deleting saved ckpts
|
| 267 |
+
|
| 268 |
+
Arguments:
|
| 269 |
+
path_to_models -- Path to the model directory
|
| 270 |
+
n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
|
| 271 |
+
sort_by_time -- True -> chronologically delete ckpts
|
| 272 |
+
False -> lexicographically delete ckpts
|
| 273 |
+
"""
|
| 274 |
+
ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))]
|
| 275 |
+
name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1)))
|
| 276 |
+
time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)))
|
| 277 |
+
sort_key = time_key if sort_by_time else name_key
|
| 278 |
+
x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], key=sort_key)
|
| 279 |
+
to_del = [os.path.join(path_to_models, fn) for fn in
|
| 280 |
+
(x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])]
|
| 281 |
+
del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}")
|
| 282 |
+
del_routine = lambda x: [os.remove(x), del_info(x)]
|
| 283 |
+
rs = [del_routine(fn) for fn in to_del]
|
| 284 |
+
|
| 285 |
+
def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
|
| 286 |
+
for k, v in scalars.items():
|
| 287 |
+
writer.add_scalar(k, v, global_step)
|
| 288 |
+
for k, v in histograms.items():
|
| 289 |
+
writer.add_histogram(k, v, global_step)
|
| 290 |
+
for k, v in images.items():
|
| 291 |
+
writer.add_image(k, v, global_step, dataformats='HWC')
|
| 292 |
+
for k, v in audios.items():
|
| 293 |
+
writer.add_audio(k, v, global_step, audio_sampling_rate)
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def latest_checkpoint_path(dir_path, regex="G_*.pth"):
|
| 297 |
+
f_list = glob.glob(os.path.join(dir_path, regex))
|
| 298 |
+
f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
|
| 299 |
+
x = f_list[-1]
|
| 300 |
+
print(x)
|
| 301 |
+
return x
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def plot_spectrogram_to_numpy(spectrogram):
|
| 305 |
+
global MATPLOTLIB_FLAG
|
| 306 |
+
if not MATPLOTLIB_FLAG:
|
| 307 |
+
import matplotlib
|
| 308 |
+
matplotlib.use("Agg")
|
| 309 |
+
MATPLOTLIB_FLAG = True
|
| 310 |
+
mpl_logger = logging.getLogger('matplotlib')
|
| 311 |
+
mpl_logger.setLevel(logging.WARNING)
|
| 312 |
+
import matplotlib.pylab as plt
|
| 313 |
+
import numpy as np
|
| 314 |
+
|
| 315 |
+
fig, ax = plt.subplots(figsize=(10,2))
|
| 316 |
+
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
|
| 317 |
+
interpolation='none')
|
| 318 |
+
plt.colorbar(im, ax=ax)
|
| 319 |
+
plt.xlabel("Frames")
|
| 320 |
+
plt.ylabel("Channels")
|
| 321 |
+
plt.tight_layout()
|
| 322 |
+
|
| 323 |
+
fig.canvas.draw()
|
| 324 |
+
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
| 325 |
+
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
| 326 |
+
plt.close()
|
| 327 |
+
return data
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def plot_alignment_to_numpy(alignment, info=None):
|
| 331 |
+
global MATPLOTLIB_FLAG
|
| 332 |
+
if not MATPLOTLIB_FLAG:
|
| 333 |
+
import matplotlib
|
| 334 |
+
matplotlib.use("Agg")
|
| 335 |
+
MATPLOTLIB_FLAG = True
|
| 336 |
+
mpl_logger = logging.getLogger('matplotlib')
|
| 337 |
+
mpl_logger.setLevel(logging.WARNING)
|
| 338 |
+
import matplotlib.pylab as plt
|
| 339 |
+
import numpy as np
|
| 340 |
+
|
| 341 |
+
fig, ax = plt.subplots(figsize=(6, 4))
|
| 342 |
+
im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
|
| 343 |
+
interpolation='none')
|
| 344 |
+
fig.colorbar(im, ax=ax)
|
| 345 |
+
xlabel = 'Decoder timestep'
|
| 346 |
+
if info is not None:
|
| 347 |
+
xlabel += '\n\n' + info
|
| 348 |
+
plt.xlabel(xlabel)
|
| 349 |
+
plt.ylabel('Encoder timestep')
|
| 350 |
+
plt.tight_layout()
|
| 351 |
+
|
| 352 |
+
fig.canvas.draw()
|
| 353 |
+
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
| 354 |
+
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
| 355 |
+
plt.close()
|
| 356 |
+
return data
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def load_wav_to_torch(full_path):
|
| 360 |
+
sampling_rate, data = read(full_path)
|
| 361 |
+
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def load_filepaths_and_text(filename, split="|"):
|
| 365 |
+
with open(filename, encoding='utf-8') as f:
|
| 366 |
+
filepaths_and_text = [line.strip().split(split) for line in f]
|
| 367 |
+
return filepaths_and_text
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def get_hparams(init=True):
|
| 371 |
+
parser = argparse.ArgumentParser()
|
| 372 |
+
parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
|
| 373 |
+
help='JSON file for configuration')
|
| 374 |
+
parser.add_argument('-m', '--model', type=str, required=True,
|
| 375 |
+
help='Model name')
|
| 376 |
+
|
| 377 |
+
args = parser.parse_args()
|
| 378 |
+
model_dir = os.path.join("./logs", args.model)
|
| 379 |
+
|
| 380 |
+
if not os.path.exists(model_dir):
|
| 381 |
+
os.makedirs(model_dir)
|
| 382 |
+
|
| 383 |
+
config_path = args.config
|
| 384 |
+
config_save_path = os.path.join(model_dir, "config.json")
|
| 385 |
+
if init:
|
| 386 |
+
with open(config_path, "r") as f:
|
| 387 |
+
data = f.read()
|
| 388 |
+
with open(config_save_path, "w") as f:
|
| 389 |
+
f.write(data)
|
| 390 |
+
else:
|
| 391 |
+
with open(config_save_path, "r") as f:
|
| 392 |
+
data = f.read()
|
| 393 |
+
config = json.loads(data)
|
| 394 |
+
|
| 395 |
+
hparams = HParams(**config)
|
| 396 |
+
hparams.model_dir = model_dir
|
| 397 |
+
return hparams
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def get_hparams_from_dir(model_dir):
|
| 401 |
+
config_save_path = os.path.join(model_dir, "config.json")
|
| 402 |
+
with open(config_save_path, "r") as f:
|
| 403 |
+
data = f.read()
|
| 404 |
+
config = json.loads(data)
|
| 405 |
+
|
| 406 |
+
hparams =HParams(**config)
|
| 407 |
+
hparams.model_dir = model_dir
|
| 408 |
+
return hparams
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
def get_hparams_from_file(config_path):
|
| 412 |
+
with open(config_path, "r") as f:
|
| 413 |
+
data = f.read()
|
| 414 |
+
config = json.loads(data)
|
| 415 |
+
|
| 416 |
+
hparams =HParams(**config)
|
| 417 |
+
return hparams
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
def check_git_hash(model_dir):
|
| 421 |
+
source_dir = os.path.dirname(os.path.realpath(__file__))
|
| 422 |
+
if not os.path.exists(os.path.join(source_dir, ".git")):
|
| 423 |
+
logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
| 424 |
+
source_dir
|
| 425 |
+
))
|
| 426 |
+
return
|
| 427 |
+
|
| 428 |
+
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
| 429 |
+
|
| 430 |
+
path = os.path.join(model_dir, "githash")
|
| 431 |
+
if os.path.exists(path):
|
| 432 |
+
saved_hash = open(path).read()
|
| 433 |
+
if saved_hash != cur_hash:
|
| 434 |
+
logger.warn("git hash values are different. {}(saved) != {}(current)".format(
|
| 435 |
+
saved_hash[:8], cur_hash[:8]))
|
| 436 |
+
else:
|
| 437 |
+
open(path, "w").write(cur_hash)
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def get_logger(model_dir, filename="train.log"):
|
| 441 |
+
global logger
|
| 442 |
+
logger = logging.getLogger(os.path.basename(model_dir))
|
| 443 |
+
logger.setLevel(logging.DEBUG)
|
| 444 |
+
|
| 445 |
+
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
|
| 446 |
+
if not os.path.exists(model_dir):
|
| 447 |
+
os.makedirs(model_dir)
|
| 448 |
+
h = logging.FileHandler(os.path.join(model_dir, filename))
|
| 449 |
+
h.setLevel(logging.DEBUG)
|
| 450 |
+
h.setFormatter(formatter)
|
| 451 |
+
logger.addHandler(h)
|
| 452 |
+
return logger
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
def repeat_expand_2d(content, target_len):
|
| 456 |
+
# content : [h, t]
|
| 457 |
+
|
| 458 |
+
src_len = content.shape[-1]
|
| 459 |
+
target = torch.zeros([content.shape[0], target_len], dtype=torch.float).to(content.device)
|
| 460 |
+
temp = torch.arange(src_len+1) * target_len / src_len
|
| 461 |
+
current_pos = 0
|
| 462 |
+
for i in range(target_len):
|
| 463 |
+
if i < temp[current_pos+1]:
|
| 464 |
+
target[:, i] = content[:, current_pos]
|
| 465 |
+
else:
|
| 466 |
+
current_pos += 1
|
| 467 |
+
target[:, i] = content[:, current_pos]
|
| 468 |
+
|
| 469 |
+
return target
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
class HParams():
|
| 473 |
+
def __init__(self, **kwargs):
|
| 474 |
+
for k, v in kwargs.items():
|
| 475 |
+
if type(v) == dict:
|
| 476 |
+
v = HParams(**v)
|
| 477 |
+
self[k] = v
|
| 478 |
+
|
| 479 |
+
def keys(self):
|
| 480 |
+
return self.__dict__.keys()
|
| 481 |
+
|
| 482 |
+
def items(self):
|
| 483 |
+
return self.__dict__.items()
|
| 484 |
+
|
| 485 |
+
def values(self):
|
| 486 |
+
return self.__dict__.values()
|
| 487 |
+
|
| 488 |
+
def __len__(self):
|
| 489 |
+
return len(self.__dict__)
|
| 490 |
+
|
| 491 |
+
def __getitem__(self, key):
|
| 492 |
+
return getattr(self, key)
|
| 493 |
+
|
| 494 |
+
def __setitem__(self, key, value):
|
| 495 |
+
return setattr(self, key, value)
|
| 496 |
+
|
| 497 |
+
def __contains__(self, key):
|
| 498 |
+
return key in self.__dict__
|
| 499 |
+
|
| 500 |
+
def __repr__(self):
|
| 501 |
+
return self.__dict__.__repr__()
|
| 502 |
+
|
vdecoder/__init__.py
ADDED
|
File without changes
|
vdecoder/hifigan/env.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import shutil
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class AttrDict(dict):
|
| 6 |
+
def __init__(self, *args, **kwargs):
|
| 7 |
+
super(AttrDict, self).__init__(*args, **kwargs)
|
| 8 |
+
self.__dict__ = self
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def build_env(config, config_name, path):
|
| 12 |
+
t_path = os.path.join(path, config_name)
|
| 13 |
+
if config != t_path:
|
| 14 |
+
os.makedirs(path, exist_ok=True)
|
| 15 |
+
shutil.copyfile(config, os.path.join(path, config_name))
|