repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
nnsvs | nnsvs-master/tests/test_model.py | import torch
from nnsvs.base import PredictionType
from nnsvs.model import (
FFN,
LSTMRNN,
LSTMRNNSAR,
MDN,
RMDN,
Conv1dResnet,
Conv1dResnetMDN,
Conv1dResnetSAR,
FFConvLSTM,
MDNv2,
VariancePredictor,
)
from nnsvs.util import init_seed
def test_deprecated_imports():
from nnsvs.model import ResF0Conv1dResnet # noqa: F401
from nnsvs.model import ResF0Conv1dResnetMDN # noqa: F401
from nnsvs.model import ResF0VariancePredictor # noqa: F401
from nnsvs.model import ResSkipF0FFConvLSTM # noqa: F401
def _test_model_impl(model, in_dim, out_dim):
B = 4
T = 100
init_seed(B * T)
x = torch.rand(B, T, in_dim)
lengths = torch.Tensor([T] * B).long()
# warmup forward pass
with torch.no_grad():
y = model(x, lengths)
y_inf = model.inference(x, lengths)
# MDN case
if model.prediction_type() == PredictionType.PROBABILISTIC:
log_pi, log_sigma, mu = y
num_gaussian = log_pi.shape[2]
assert mu.shape == (B, T, num_gaussian, out_dim)
assert log_sigma.shape == (B, T, num_gaussian, out_dim)
# NOTE: infernece output shouldn't have num_gaussian axis
mu_inf, sigma_inf = y_inf
assert mu_inf.shape == (B, T, out_dim)
assert sigma_inf.shape == (B, T, out_dim)
else:
assert y.shape == (B, T, out_dim)
assert y.shape == y_inf.shape
def test_ffn():
params = {
"in_dim": 300,
"hidden_dim": 8,
"out_dim": 20,
"num_layers": 2,
"dropout": 0.1,
"init_type": "none",
}
model = FFN(**params)
assert model.prediction_type() == PredictionType.DETERMINISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
def test_lstmrnn():
params = {
"in_dim": 300,
"hidden_dim": 8,
"out_dim": 20,
"num_layers": 2,
"dropout": 0.1,
"init_type": "none",
}
model = LSTMRNN(**params)
assert model.prediction_type() == PredictionType.DETERMINISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
def test_conv1d_resnet():
params = {
"in_dim": 300,
"hidden_dim": 8,
"out_dim": 180,
"num_layers": 2,
"num_gaussians": 2,
"dim_wise": True,
"init_type": "none",
}
model = Conv1dResnet(**{**params, "use_mdn": False})
assert model.prediction_type() == PredictionType.DETERMINISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
model = Conv1dResnet(**{**params, "use_mdn": True})
assert model.prediction_type() == PredictionType.PROBABILISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
# Deprecated
model = Conv1dResnetMDN(**params)
assert model.prediction_type() == PredictionType.PROBABILISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
def test_conv1d_resnet_sar():
params = {
"in_dim": 300,
"hidden_dim": 8,
"out_dim": 20,
"num_layers": 2,
"stream_sizes": [10, 10],
"ar_orders": [2, 2],
"init_type": "none",
}
model = Conv1dResnetSAR(**params)
assert model.prediction_type() == PredictionType.DETERMINISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
y = torch.rand(8, 100, params["out_dim"])
y_hat = model.preprocess_target(y)
assert y.shape == y_hat.shape
def test_lstmrnn_sar():
params = {
"in_dim": 300,
"hidden_dim": 8,
"out_dim": 20,
"num_layers": 2,
"dropout": 0.1,
"stream_sizes": [10, 10],
"ar_orders": [2, 2],
"init_type": "none",
}
model = LSTMRNNSAR(**params)
assert model.prediction_type() == PredictionType.DETERMINISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
y = torch.rand(8, 100, params["out_dim"])
y_hat = model.preprocess_target(y)
assert y.shape == y_hat.shape
def test_mdn():
params = {
"in_dim": 300,
"hidden_dim": 8,
"out_dim": 180,
"num_layers": 2,
"num_gaussians": 2,
"dim_wise": True,
"init_type": "none",
}
model = MDN(**params)
assert model.prediction_type() == PredictionType.PROBABILISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
def test_mdnv2():
params = {
"in_dim": 300,
"hidden_dim": 8,
"out_dim": 180,
"num_layers": 2,
"dropout": 0.5,
"num_gaussians": 2,
"dim_wise": True,
"init_type": "none",
}
model = MDNv2(**params)
assert model.prediction_type() == PredictionType.PROBABILISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
def test_rmdn():
params = {
"in_dim": 300,
"hidden_dim": 8,
"out_dim": 180,
"num_layers": 2,
"num_gaussians": 2,
"dim_wise": True,
"init_type": "none",
}
model = RMDN(**params)
assert model.prediction_type() == PredictionType.PROBABILISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
def test_ff_conv_lstm():
params = {
"in_dim": 300,
"ff_hidden_dim": 8,
"conv_hidden_dim": 8,
"lstm_hidden_dim": 8,
"dropout": 0.1,
"num_lstm_layers": 2,
"bidirectional": True,
"out_dim": 180,
"init_type": "none",
}
model = FFConvLSTM(**params)
assert model.prediction_type() == PredictionType.DETERMINISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
def test_variance_predictor():
params = {
"in_dim": 300,
"out_dim": 180,
"num_layers": 2,
"hidden_dim": 8,
"kernel_size": 5,
"dropout": 0.5,
"init_type": "none",
}
model = VariancePredictor(**{**params, "use_mdn": False})
assert model.prediction_type() == PredictionType.DETERMINISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
model = VariancePredictor(**{**params, "use_mdn": True})
assert model.prediction_type() == PredictionType.PROBABILISTIC
_test_model_impl(model, params["in_dim"], params["out_dim"])
| 6,229 | 26.688889 | 66 | py |
nnsvs | nnsvs-master/tests/test_model_configs.py | from pathlib import Path
import hydra
import nnsvs.bin.train
import nnsvs.bin.train_acoustic
import nnsvs.bin.train_postfilter
import pytest
import torch
from nnsvs.util import init_seed
from omegaconf import OmegaConf
from .util import _test_model_impl
RECIPE_DIR = Path(__file__).parent.parent / "recipes"
def _test_postfilter_impl(model, model_config):
B = 4
T = 100
init_seed(B * T)
in_dim = sum(model_config.stream_sizes)
x = torch.rand(B, T, in_dim)
lengths = torch.Tensor([T] * B).long()
# warmup forward pass
with torch.no_grad():
y = model(x, lengths)
y_inf = model.inference(x, lengths)
assert x.shape == y.shape
assert y_inf.shape == y.shape
@pytest.mark.parametrize(
"model_config",
(Path(nnsvs.bin.train.__file__).parent / "conf" / "train" / "model").glob("*.yaml"),
)
def test_model_config(model_config):
model_config = OmegaConf.load(model_config)
model = hydra.utils.instantiate(model_config.netG)
_test_model_impl(model, model_config.netG.in_dim, model_config.netG.out_dim)
@pytest.mark.parametrize(
"model_config",
(
Path(nnsvs.bin.train_acoustic.__file__).parent
/ "conf"
/ "train_acoustic"
/ "model"
).glob("*.yaml"),
)
def test_acoustic_model_config(model_config):
print(model_config)
model_config = OmegaConf.load(model_config)
# Dummy
model_config.netG.in_lf0_idx = 10
model_config.netG.in_lf0_min = 5.3936276
model_config.netG.in_lf0_max = 6.491111
model_config.netG.out_lf0_idx = 60
model_config.netG.out_lf0_mean = 5.953093881972361
model_config.netG.out_lf0_scale = 0.23435173188961034
model = hydra.utils.instantiate(model_config.netG)
_test_model_impl(model, model_config.netG.in_dim, model_config.netG.out_dim)
@pytest.mark.parametrize(
"model_config",
(
Path(nnsvs.bin.train_postfilter.__file__).parent
/ "conf"
/ "train_postfilter"
/ "model"
).glob("*.yaml"),
)
def test_postfilter_model_config(model_config):
model_config = OmegaConf.load(model_config)
if "stream_sizes" in model_config.netG:
model_config.netG.stream_sizes = model_config.stream_sizes
# Post-filter config should have netD
hydra.utils.instantiate(model_config.netD)
model = hydra.utils.instantiate(model_config.netG)
_test_postfilter_impl(model, model_config)
@pytest.mark.parametrize(
"model_config", RECIPE_DIR.glob("**/_common/conf/**/train/timelag/model/*.yaml")
)
def test_timelag_model_config_recipes(model_config):
model_config = OmegaConf.load(model_config)
model = hydra.utils.instantiate(model_config.netG)
_test_model_impl(model, model_config.netG.in_dim, model_config.netG.out_dim)
@pytest.mark.parametrize(
"model_config", RECIPE_DIR.glob("**/_common/conf/**/train/duration/model/*.yaml")
)
def test_duration_model_config_recipes(model_config):
model_config = OmegaConf.load(model_config)
model = hydra.utils.instantiate(model_config.netG)
_test_model_impl(model, model_config.netG.in_dim, model_config.netG.out_dim)
@pytest.mark.parametrize(
"model_config", RECIPE_DIR.glob("**/_common/conf/**/train_acoustic/model/*.yaml")
)
def test_train_acoustic_model_config_recipes(model_config):
print(model_config)
model_config = OmegaConf.load(model_config)
# Dummy
model_config.netG.in_lf0_idx = 10
model_config.netG.in_lf0_min = 5.3936276
model_config.netG.in_lf0_max = 6.491111
model_config.netG.out_lf0_idx = 60
model_config.netG.out_lf0_mean = 5.953093881972361
model_config.netG.out_lf0_scale = 0.23435173188961034
# NOTE: phoneme embedding impl requires careful specifications of
# in_ph_start_idx and in_ph_end_idx, which is diffifult to set property
# without hed files. So we skip phoneme embedding impl for now.
if "embed_dim" in model_config.netG:
model_config.netG.embed_dim = None
keys = ["lf0_model", "mgc_model", "bap_model", "vuv_model", "encoder", "mel_model"]
for key in keys:
if key in model_config.netG and "embed_dim" in model_config.netG[key]:
model_config.netG[key].embed_dim = None
# For GaussianDiffusion that includes encoder in its implementation
if (
key in model_config.netG
and "encoder" in model_config.netG[key]
and "embed_dim" in model_config.netG[key]["encoder"]
):
model_config.netG[key]["encoder"].embed_dim = None
model = hydra.utils.instantiate(model_config.netG)
_test_model_impl(model, model_config.netG.in_dim, model_config.netG.out_dim)
@pytest.mark.parametrize(
"model_config",
RECIPE_DIR.glob("**/icassp2023-24k-world/conf/train_acoustic/model/*.yaml"),
)
def test_train_acoustic_model_config_recipes_icassp2023(model_config):
model_config = OmegaConf.load(model_config)
# Dummy
model_config.netG.in_lf0_idx = 10
model_config.netG.in_lf0_min = 5.3936276
model_config.netG.in_lf0_max = 6.491111
model_config.netG.out_lf0_idx = 60
model_config.netG.out_lf0_mean = 5.953093881972361
model_config.netG.out_lf0_scale = 0.23435173188961034
# NOTE: phoneme embedding impl requires careful specifications of
# in_ph_start_idx and in_ph_end_idx, which is diffifult to set property
# without hed files. So we skip phoneme embedding impl for now.
if "embed_dim" in model_config.netG:
model_config.netG.embed_dim = None
keys = ["lf0_model", "mgc_model", "bap_model", "vuv_model", "encoder", "mel_model"]
for key in keys:
if key in model_config.netG and "embed_dim" in model_config.netG[key]:
model_config.netG[key].embed_dim = None
model = hydra.utils.instantiate(model_config.netG)
_test_model_impl(model, model_config.netG.in_dim, model_config.netG.out_dim)
@pytest.mark.parametrize(
"model_config", RECIPE_DIR.glob("**/_common/conf/**/train_postfilter/model/*.yaml")
)
def test_postfilter_config_recipes(model_config):
model_config = OmegaConf.load(model_config)
if "stream_sizes" in model_config.netG:
model_config.netG.stream_sizes = model_config.stream_sizes
# Post-filter config should have netD
hydra.utils.instantiate(model_config.netD)
model = hydra.utils.instantiate(model_config.netG)
_test_postfilter_impl(model, model_config)
| 6,395 | 33.203209 | 88 | py |
nnsvs | nnsvs-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
import os
import pkg_resources
__version__ = pkg_resources.get_distribution("nnsvs").version
ON_RTD = os.environ.get("READTHEDOCS", None) == "True"
project = "nnsvs"
copyright = "2020, Ryuichi Yamamoto"
author = "Ryuichi Yamamoto"
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"nbsphinx",
"matplotlib.sphinxext.plot_directive",
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"sphinxcontrib.bibtex",
"sphinxcontrib.youtube",
]
if ON_RTD:
# Remove extensions not currently supported on RTD
extensions.remove("matplotlib.sphinxext.plot_directive")
bibtex_bibfiles = ["refs.bib"]
autodoc_member_order = "bysource"
autosummary_generate = True
numpydoc_show_class_members = False
# ------------------------------------------------------------------------------
# Plot
# ------------------------------------------------------------------------------
doctest_global_setup = """
import numpy as np
import scipy
import librosa
np.random.seed(123)
np.set_printoptions(precision=3, linewidth=64, edgeitems=2, threshold=200)
"""
plot_pre_code = (
doctest_global_setup
+ """
import matplotlib
import librosa
import librosa.display
matplotlib.rcParams['figure.constrained_layout.use'] = librosa.__version__ >= '0.8'
"""
)
plot_include_source = True
plot_html_show_source_link = False
plot_formats = [("png", 100), ("pdf", 100)]
plot_html_show_formats = False
plot_rcparams = {}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = []
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/dev", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"matplotlib": ("https://matplotlib.org/stable", None),
"pytest": ("https://docs.pytest.org/en/stable", None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
| 4,386 | 29.255172 | 83 | py |
nnsvs | nnsvs-master/neutrino_compat/server.py | """Web server implementation for singing voice synthesis
NOTE: validation is not implemented. Expect 500 errors for unexpected inputs.
"""
import tarfile
from os import listdir, rmdir
from pathlib import Path
from shutil import move
import numpy as np
import pyworld
import torch
from fastapi import FastAPI, UploadFile
from nnmnkwii.io import hts
from nnsvs.io.hts import full_to_mono
from nnsvs.svs import NEUTRINO
from omegaconf import OmegaConf
from scipy.io import wavfile
from starlette.responses import RedirectResponse, StreamingResponse
from utaupy.utils import ust2hts
SCORE_DIR = Path("./score")
MUSICXML_DIR = SCORE_DIR / "musicxml"
FULL_LAB_DIR = SCORE_DIR / "label" / "full"
MONO_LAB_DIR = SCORE_DIR / "label" / "mono"
UST_DIR = SCORE_DIR / "ust"
TIMING_LAB_DIR = SCORE_DIR / "label" / "timing"
OUTPUT_DIR = Path("./output")
MODEL_DIR = Path("./model")
for d in [
SCORE_DIR,
MUSICXML_DIR,
FULL_LAB_DIR,
MONO_LAB_DIR,
UST_DIR,
TIMING_LAB_DIR,
OUTPUT_DIR,
MODEL_DIR,
]:
d.mkdir(exist_ok=True, parents=True)
app = FastAPI()
_models = {}
def _instantiate_model(model_id):
global _models
if model_id in _models:
return _models[model_id]
model = NEUTRINO(
MODEL_DIR / model_id,
device="cuda" if torch.cuda.is_available() else "cpu",
verbose=100,
)
_models[model_id] = model
return model
def _finalize():
if torch.cuda.is_available():
torch.cuda.empty_cache()
@app.get("/healthcheck")
def perform_healthcheck():
return {"healthcheck": "OK"}
@app.get("/")
async def docs_redirect():
return RedirectResponse(url="/docs")
@app.get("/models/list")
async def model_list():
model_ids = listdir(MODEL_DIR)
return {"model_ids": model_ids}
@app.get("/models/{model_id}")
async def model_info(model_id: str):
model = NEUTRINO(
MODEL_DIR / model_id,
device="cuda" if torch.cuda.is_available() else "cpu",
verbose=100,
)
return {
"config": OmegaConf.to_container(model.config),
"repr": repr(model),
}
@app.post("/models/create")
def create_model(model: UploadFile, model_id: str):
filename = Path(model.filename)
if str(filename).endswith(".tar.gz"):
model_dir = MODEL_DIR / model_id
model_dir.mkdir(exist_ok=True)
with tarfile.open(fileobj=model.file, mode="r|gz") as f:
f.extractall(path=model_dir)
tar_dir_path = model_dir / filename.name.replace(".tar.gz", "")
# Move all contents to model_dir / model_id
if (tar_dir_path).exists():
for name in listdir(tar_dir_path):
move(tar_dir_path / name, model_dir / name)
rmdir(tar_dir_path)
else:
raise NotImplementedError()
return {"filename": model.filename}
@app.post("/score/full/upload")
async def upload_full_lab(full_lab: UploadFile):
with open(f"{FULL_LAB_DIR}/{full_lab.filename}", "wb") as f:
f.write(full_lab.file.read())
return {"filename": full_lab.filename}
@app.post("/score/timing/upload")
async def upload_timing_lab(timing_lab: UploadFile):
with open(f"{TIMING_LAB_DIR}/{timing_lab.filename}", "wb") as f:
f.write(timing_lab.file.read())
return {"filename": timing_lab.filename}
@app.post("/score/musicxml/upload")
async def upload_musicxml(musicxml: UploadFile):
filename = musicxml.filename
musicxml_path = MUSICXML_DIR / filename
with open(musicxml_path, "wb") as f:
f.write(musicxml.file.read())
full_labels, mono_labels = NEUTRINO.musicxml2label(str(musicxml_path))
full_lab_path = FULL_LAB_DIR / (
filename.replace(".musicxml", "").replace(".xml", "") + ".lab"
)
mono_lab_path = MONO_LAB_DIR / (
filename.replace(".musicxml", "").replace(".xml", "") + ".lab"
)
with open(full_lab_path, "w") as f:
f.write(str(full_labels))
with open(mono_lab_path, "w") as f:
f.write(str(mono_labels))
return {"filename": filename}
@app.post("/score/ust/upload")
async def upload_ust(ust: UploadFile, model_id: str):
ust_path = UST_DIR / ust.filename
with open(ust_path, "wb") as f:
f.write(ust.file.read())
model_dir = MODEL_DIR / model_id
table_path = model_dir / "kana2phonemes.table"
assert table_path.exists()
full_lab = FULL_LAB_DIR / ust.filename.replace(".ust", ".lab")
ust2hts(
str(ust_path), full_lab, table_path, strict_sinsy_style=False, as_mono=False
)
return {"filename": ust.filename}
@app.get("/run/timing")
async def run_timing(name: str, model_id: str):
model = _instantiate_model(model_id)
model.set_device("cuda" if torch.cuda.is_available() else "cpu")
full_lab = FULL_LAB_DIR / (name + ".lab")
assert full_lab.exists(), f"{full_lab} does not exist"
full_labels = hts.load(full_lab)
timing_labels = full_to_mono(model.predict_timing(full_labels))
# TODO: Do we want to save timing for each model?
# timing_lab = TIMING_LAB_DIR / model_id / (name + ".lab")
timing_lab = TIMING_LAB_DIR / (name + ".lab")
with open(timing_lab, "w") as f:
f.write(str(timing_labels))
_finalize()
return {"timing": str(timing_labels)}
@app.get("/run/phrases")
async def run_phrases(name: str, model_id: str):
model = _instantiate_model(model_id)
model.set_device("cuda" if torch.cuda.is_available() else "cpu")
full_lab = FULL_LAB_DIR / (name + ".lab")
assert full_lab.exists(), f"{full_lab} does not exist"
full_labels = hts.load(full_lab)
timing_lab = TIMING_LAB_DIR / (name + ".lab")
assert timing_lab.exists(), "Timing labels not found. "
timing_labels = hts.load(timing_lab)
model_output_dir = OUTPUT_DIR / model_id
model_output_dir.mkdir(exist_ok=True)
# Dump phraselist
phraselist_path = model_output_dir / (name + "-phraselist.txt")
phraselist = model.get_phraselist(full_labels, timing_labels)
with open(phraselist_path, "w") as f:
f.write(str(phraselist))
# Dump num_phrases for convenience
num_phrases = model.get_num_phrases(full_labels)
return {"phraselist": phraselist, "num_phrases": num_phrases}
@app.get("/run/acoustic")
async def run_acoustic(name: str, model_id: str, phrase_num: int = -1):
model = _instantiate_model(model_id)
model.set_device("cuda" if torch.cuda.is_available() else "cpu")
full_lab = FULL_LAB_DIR / (name + ".lab")
assert full_lab.exists(), f"{full_lab} does not exist"
full_labels = hts.load(full_lab)
timing_lab = TIMING_LAB_DIR / (name + ".lab")
assert timing_lab.exists(), "Timing labels not found. "
timing_labels = hts.load(timing_lab)
model_output_dir = OUTPUT_DIR / model_id
model_output_dir.mkdir(exist_ok=True)
f0, mgc, bap = model.predict_acoustic(
full_labels,
timing_labels,
phrase_num=phrase_num,
)
_finalize()
if phrase_num > 0:
name = f"{name}-{phrase_num}"
f0.tofile(model_output_dir / (name + ".f0"))
mgc.tofile(model_output_dir / (name + ".mgc"))
bap.tofile(model_output_dir / (name + ".bap"))
# NOTE: pack into a single file for convenience
feats = np.concatenate([f0, mgc, bap], axis=1).astype(np.float64)
path = model_output_dir / (name + ".bin")
feats.tofile(path)
def iterfile():
with open(path, mode="rb") as file_like:
yield from file_like
return StreamingResponse(iterfile())
@app.get("/run/vocoder")
async def run_vocoder(
name: str,
model_id: str,
vocoder_type: str = "world",
phrase_num: int = -1,
loudness_norm: bool = False,
dtype: str = "int16",
):
model = _instantiate_model(model_id)
model.set_device("cuda" if torch.cuda.is_available() else "cpu")
if phrase_num > 0:
name = f"{name}-{phrase_num}"
f0_path = OUTPUT_DIR / model_id / (name + ".f0")
mgc_path = OUTPUT_DIR / model_id / (name + ".mgc")
bap_path = OUTPUT_DIR / model_id / (name + ".bap")
f0 = np.fromfile(f0_path, dtype=np.float64).reshape(-1, 1)
mgc = np.fromfile(mgc_path, dtype=np.float64).reshape(-1, 60)
bap = np.fromfile(bap_path, dtype=np.float64).reshape(
-1, pyworld.get_num_aperiodicities(model.sample_rate)
)
wav = model.predict_waveform(
f0,
mgc,
bap,
vocoder_type=vocoder_type,
loudness_norm=loudness_norm,
dtype=dtype,
)
_finalize()
if vocoder_type == "world":
suffix = "_syn.wav"
else:
suffix = "_nsf.wav"
wav_path = OUTPUT_DIR / model_id / (name + suffix)
wavfile.write(wav_path, model.sample_rate, wav)
path = OUTPUT_DIR / model_id / (name + suffix.replace(".wav", ".raw"))
wav.tofile(path)
def iterfile():
with open(path, mode="rb") as file_like:
yield from file_like
return StreamingResponse(iterfile())
| 8,930 | 27.352381 | 84 | py |
nnsvs | nnsvs-master/neutrino_compat/bin/NEUTRINO.py | """Predict acoustic features by NNSVS with NEUTRINO-compatible file IO
NOTE: options are not yet fully implemented
NEUTRINO - NEURAL SINGING SYNTHESIZER (Electron v1.2.0-Stable)
Copyright (c) 2020-2022 STUDIO NEUTRINO All rights reserved.
usage:
NEUTRINO full.lab timing.lab output.f0 output.mgc output.bap model_directory [option]
options : description [default]
-n i : number of threads (CPU) [MAX]
-k i : style shift [ 0]
-s : skip timing prediction [off]
-a : skip acoustic features prediction [off]
-p i : single phrase prediction [ -1]
-i filename : trace phrase information [off]
-t : view information [off]
"""
import argparse
import logging
import sys
import tempfile
import time
from pathlib import Path
import numpy as np
import requests
def get_parser():
parser = argparse.ArgumentParser(
description="Pretend as if the script is NEUTRINO",
)
parser.add_argument("input_file", type=str, help="Input file (.ust or .lab)")
parser.add_argument("timing_lab", type=str, help="Path of timing labels")
parser.add_argument("output_f0", type=str, help="Path of output F0")
parser.add_argument("output_mgc", type=str, help="Path of output MGC")
parser.add_argument("output_bap", type=str, help="Path of output BAP")
parser.add_argument("model_dir", type=str, help="model_dir")
parser.add_argument(
"-i", "--phraselist", type=str, default=None, help="Path of phraselist"
),
parser.add_argument(
"-p", "--phrase_num", type=int, default=-1, help="Phrase number"
)
parser.add_argument("--use_api", action="store_true", help="Use web API")
parser.add_argument(
"--url", type=str, default="http://127.0.0.1:8001", help="URL of the server"
)
return parser
def run_local(args, _):
import torch
from nnmnkwii.io import hts
from nnsvs.io.hts import full_to_mono
from nnsvs.svs import NEUTRINO
from utaupy.utils import ust2hts
model_dir = Path(args.model_dir)
# NOTE: this is needed to be compatible with NEUTRINO's Run.bat
if not model_dir.exists():
model_dir = "model" / model_dir
engine = NEUTRINO(
model_dir, device="cuda" if torch.cuda.is_available() else "cpu", verbose=100
)
input_file = Path(args.input_file)
assert input_file.exists()
if input_file.suffix == ".ust":
table_path = model_dir / "kana2phonemes.table"
assert table_path.exists()
with tempfile.NamedTemporaryFile(suffix=".lab") as tf:
ust2hts(
str(input_file),
tf.name,
table_path,
strict_sinsy_style=False,
as_mono=False,
)
full_labels = hts.HTSLabelFile()
with open(tf.name) as f:
for label in f.readlines():
full_labels.append(label.split(), strict=False)
elif input_file.suffix == ".lab":
full_labels = hts.load(input_file)
else:
raise ValueError(f"Not supported file type: {input_file.suffix}")
timing_lab = Path(args.timing_lab)
if not timing_lab.exists():
timing_labels = full_to_mono(engine.predict_timing(full_labels))
with open(timing_lab, "w") as f:
f.write(str(timing_labels))
else:
timing_labels = hts.load(timing_lab)
if args.phraselist is not None:
phraselist = Path(args.phraselist)
if not phraselist.exists():
phraselist_str = engine.get_phraselist(full_labels, timing_labels)
with open(phraselist, "w") as f:
f.write(phraselist_str)
num_phrases = engine.get_num_phrases(full_labels)
if args.phrase_num < 0 or args.phrase_num >= num_phrases:
raise ValueError(f"phrase_num must be in [0, {num_phrases - 1}]")
f0, mgc, bap = engine.predict_acoustic(
full_labels, timing_labels, phrase_num=args.phrase_num
)
return f0, mgc, bap
def run_api(args, logger):
input_file = Path(args.input_file)
url = args.url[:-1] if args.url[-1] == "/" else args.url
name = input_file.stem
# pretend as if the model_dir is model_id
model_id = args.model_dir
# Upload full-context labels or UST
if input_file.suffix == ".ust":
logger.info(f"Uploading UST: {input_file}")
res = requests.post(
url + "/score/ust/upload",
params={
"model_id": model_id,
},
files={
"ust": open(input_file, "rb"),
},
)
elif input_file.suffix == ".lab":
logger.info(f"Uploading full_lab: {input_file}")
res = requests.post(
url + "/score/full/upload",
files={
"full_lab": open(input_file, "rb"),
},
)
if res.status_code != 200:
raise RuntimeError(f"Failed to upload file: {res.status_code}")
# Upload (possibly modified) timing labels if present
if Path(args.timing_lab).exists():
logger.info(f"Uploading timing_lab: {args.timing_lab}")
res = requests.post(
url + "/score/timing/upload",
files={
"timing_lab": open(args.timing_lab, "rb"),
},
)
if res.status_code != 200:
raise RuntimeError(f"Failed to upload file: {res.status_code}")
else:
# Predict timing
logger.info("Predicting timing")
res = requests.get(
url + "/run/timing",
params={
"name": name,
"model_id": model_id,
},
)
if res.status_code != 200:
raise RuntimeError(f"Failed to predict timing: {res.status_code}")
timing_str = res.json()["timing"]
logger.info(timing_str)
with open(args.timing_lab, "w") as f:
f.write(timing_str)
# Phraselist
if args.phraselist is not None:
logger.info("Predicting phraselist")
res = requests.get(
url + "/run/phrases",
params={
"name": name,
"model_id": model_id,
},
)
if res.status_code != 200:
raise RuntimeError(f"Failed to predict phraselist: {res.status_code}")
phraselist_str = res.json()["phraselist"]
logger.info(phraselist_str)
with open(args.phraselist, "w") as f:
f.write(phraselist_str)
# Predict acoustic features
logger.info("Predicting acoustic features")
res = requests.get(
url + "/run/acoustic",
params={
"name": name,
"model_id": model_id,
"phrase_num": args.phrase_num,
},
)
if res.status_code != 200:
raise RuntimeError(f"Failed to predict acoustic features: {res.status_code}")
feats = np.frombuffer(res.content, dtype=np.float64).reshape(-1, 66)
f0 = feats[:, :1]
mgc = feats[:, 1:61]
bap = feats[:, 61:]
logger.info(f"f0: {f0.shape}")
logger.info(f"mgc: {mgc.shape}")
logger.info(f"bap: {bap.shape}")
return f0, mgc, bap
def main():
args = get_parser().parse_args(sys.argv[1:])
start_time = time.time()
format = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logger = logging.getLogger("NEUTRINO")
logger.setLevel(logging.INFO)
if logger.hasHandlers():
logger.handlers.clear()
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format))
logger.addHandler(stream_handler)
if args.use_api:
logger.info(f"Using webapi: {args.url} for infernce")
f0, mgc, bap = run_api(args, logger)
else:
logger.info("Using local machine for inference")
f0, mgc, bap = run_local(args, logger)
# Save to file
f0.tofile(args.output_f0)
mgc.tofile(args.output_mgc)
bap.tofile(args.output_bap)
logger.info(f"Elapsed time: {time.time() - start_time:.2f} sec")
if __name__ == "__main__":
main()
| 8,240 | 32.5 | 89 | py |
nnsvs | nnsvs-master/neutrino_compat/bin/NSF.py | """Predict waveform by neural vocoders with NEUTRINO-compatible file IO
NOTE: options are not yet fully implemented
NSF - Neural Source Filter (v1.2.0-Stable)
Copyright (c) 2020-2022 STUDIO NEUTRINO All rights reserved.
usage:
NSF input.f0 input.mgc input.bap model_name output_wav [option]
options : description [default]
-s i : sampling rate (kHz) [ 48]
-n i : number of parallel [ MAX]
-p i : number of parallel in session [ 1]
-l file name : multi phrase prediction [ none]
-g : use gpu [ off]
-i i : gpu id [ 0]
-t : view information [ off]
"""
import argparse
import logging
import sys
import time
from pathlib import Path
import numpy as np
import requests
import soundfile as sf
def get_parser():
parser = argparse.ArgumentParser(
description="Pretend as if the script is NSF of NEUTRINO",
)
parser.add_argument("input_f0", type=str, help="Path of input F0")
parser.add_argument("input_mgc", type=str, help="Path of input MGC")
parser.add_argument("input_bap", type=str, help="Path of input BAP")
parser.add_argument("model_dir", type=str, help="model_dir")
parser.add_argument("output_wav", type=str, help="Path of output wav")
parser.add_argument("--use_api", action="store_true", help="Use web API")
parser.add_argument(
"--url", type=str, default="http://127.0.0.1:8001", help="URL of the server"
)
return parser
def run_local(args, _):
import pyworld
import torch
from nnsvs.svs import NEUTRINO
model_dir = Path(args.model_dir)
# NOTE: this is needed to be compatible with NEUTRINO's Run.bat
if not model_dir.exists():
model_dir = "model" / model_dir
engine = NEUTRINO(model_dir, device="cuda" if torch.cuda.is_available() else "cpu")
f0 = np.fromfile(args.input_f0, dtype=np.float64).reshape(-1, 1)
mgc = np.fromfile(args.input_mgc, dtype=np.float64).reshape(-1, 60)
bap = np.fromfile(args.input_bap, dtype=np.float64).reshape(
-1, pyworld.get_num_aperiodicities(engine.sample_rate)
)
# NOTE: `auto` will run uSFGAN or PWG if a trained one is in the model_dir
# and fallback to WORLD it doesn't exist.
wav = engine.predict_waveform(f0, mgc, bap, vocoder_type="auto", dtype=np.int16)
return wav, engine.sample_rate
def run_api(args, logger):
url = args.url[:-1] if args.url[-1] == "/" else args.url
# NOTE: for webAPI, these local files are not used for inference
# files on the remote server are used instead.
input_f0 = Path(args.input_f0)
input_mgc = Path(args.input_mgc)
input_bap = Path(args.input_bap)
assert input_f0.exists()
assert input_mgc.exists()
assert input_bap.exists()
name = input_f0.stem
# TODO: better way to handle phrase-based synthesis
# At the moment, we need to tell the server which phrase we are synthesizing
if "-" in name:
# NOTE: this code does not work for complicated filenames
phrase_num = int(name.split("-")[-1])
name = name.split("-")[0]
else:
phrase_num = -1
# pretend as if the model_dir is model_id
model_id = args.model_dir
# Get sampling rate
res = requests.get(
url + f"/models/{model_id}",
)
if res.status_code != 200:
raise RuntimeError(f"Failed to fetch model info: {res.status_code}")
res = res.json()
sample_rate = res["config"]["sample_rate"]
# Run vocoder
logger.info("Predicting waveform")
dtype = "int16"
res = requests.get(
url + "/run/vocoder",
params={
"name": name,
"model_id": model_id,
"phrase_num": phrase_num,
"vocoder_type": "auto",
"dtype": dtype,
"loudness_norm": False,
},
)
if res.status_code != 200:
raise RuntimeError(f"Failed to generate waveform: {res.status_code}")
wav = np.frombuffer(res.content, dtype=dtype).reshape(-1)
return wav, sample_rate
def main():
args = get_parser().parse_args(sys.argv[1:])
start_time = time.time()
format = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logger = logging.getLogger("NSF")
logger.setLevel(logging.INFO)
if logger.hasHandlers():
logger.handlers.clear()
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format))
logger.addHandler(stream_handler)
if args.use_api:
logger.info(f"Using webapi: {args.url} for inference")
wav, sr = run_api(args, logger)
else:
logger.info("Using local machine for inference")
wav, sr = run_local(args, logger)
sf.write(args.output_wav, wav, sr)
logger.info(f"Elapsed time: {time.time() - start_time:.2f} sec")
if __name__ == "__main__":
main()
| 5,014 | 31.993421 | 87 | py |
nnsvs | nnsvs-master/utils/enunu2nnsvs.py | """Convert ENUNU's packed model to NNSVS's style
"""
import argparse
import os
import shutil
import sys
from pathlib import Path
import joblib
import numpy as np
import torch
from nnsvs.logger import getLogger
from nnsvs.util import StandardScaler as NNSVSStandardScaler
from omegaconf import OmegaConf
from sklearn.preprocessing import MinMaxScaler, StandardScaler
def get_parser():
parser = argparse.ArgumentParser(
description="Convert ENUNU's packed model to NNSVS's style",
)
parser.add_argument("enunu_dir", type=str, help="ENUNU's model dir")
parser.add_argument("out_dir", type=str, help="Output dir")
parser.add_argument("--verbose", type=int, default=100, help="Verbose level")
return parser
def _scaler2numpy(input_file, out_dir, logger):
scaler = joblib.load(input_file)
if isinstance(scaler, StandardScaler) or isinstance(scaler, NNSVSStandardScaler):
logger.info(f"Converting {input_file} mean/scale npy files")
mean_path = out_dir / (input_file.stem + "_mean.npy")
scale_path = out_dir / (input_file.stem + "_scale.npy")
var_path = out_dir / (input_file.stem + "_var.npy")
np.save(mean_path, scaler.mean_, allow_pickle=False)
np.save(scale_path, scaler.scale_, allow_pickle=False)
np.save(var_path, scaler.var_, allow_pickle=False)
elif isinstance(scaler, MinMaxScaler):
logger.info(f"Converting {input_file} min/max npy files")
min_path = out_dir / (input_file.stem + "_min.npy")
scale_path = out_dir / (input_file.stem + "_scale.npy")
np.save(min_path, scaler.min_, allow_pickle=False)
np.save(scale_path, scaler.scale_, allow_pickle=False)
else:
raise ValueError(f"Unknown scaler type: {type(scaler)}")
def _save_checkpoint(input_file, output_file, logger):
checkpoint = torch.load(
input_file, map_location=torch.device("cpu") # pylint: disable='no-member'
)
size = os.path.getsize(input_file)
logger.info(f"Processisng: {input_file}")
logger.info(f"File size (before): {size / 1024/1024:.3f} MB")
for k in ["optimizer_state", "lr_scheduler_state"]:
if k in checkpoint.keys():
del checkpoint[k]
# For https://github.com/kan-bayashi/ParallelWaveGAN
for k in ["optimizer", "lr_scheduler"]:
if k in checkpoint.keys():
del checkpoint[k]
if "model" in checkpoint and "discriminator" in checkpoint["model"]:
del checkpoint["model"]["discriminator"]
torch.save(checkpoint, output_file)
size = os.path.getsize(output_file)
logger.info(f"File size (after): {size / 1024/1024:.3f} MB")
def main(enunu_dir, out_dir, verbose=100):
"""Run the main function
NOTE: This function is used by https://github.com/oatsu-gh/SimpleEnunu.
So we need to be careful about the changes.
It would be probably better to move this functionality to under the nnsvs
directory.
"""
logger = getLogger(verbose=verbose)
enunu_dir = Path(enunu_dir)
out_dir = Path(out_dir)
out_dir.mkdir(exist_ok=True, parents=True)
enuconfig = OmegaConf.load(enunu_dir / "enuconfig.yaml")
# Hed
qst_path = enunu_dir / enuconfig.question_path
shutil.copyfile(qst_path, out_dir / "qst.hed")
# Table
table_path = enunu_dir / enuconfig.table_path
shutil.copyfile(table_path, out_dir / "kana2phonemes.table")
# Models
model_dir = enunu_dir / enuconfig.model_dir
assert model_dir.exists()
for typ in ["timelag", "duration", "acoustic"]:
model_config = model_dir / typ / "model.yaml"
assert model_config.exists()
checkpoint = model_dir / typ / enuconfig[typ]["checkpoint"]
assert checkpoint.exists()
shutil.copyfile(model_config, out_dir / f"{typ}_model.yaml")
_save_checkpoint(checkpoint, out_dir / f"{typ}_model.pth", logger)
for inout in ["in", "out"]:
scaler_path = (
enunu_dir / enuconfig.stats_dir / f"{inout}_{typ}_scaler.joblib"
)
_scaler2numpy(scaler_path, out_dir, logger)
# Config
s = f"""# Global configs
sample_rate: {enuconfig.sample_rate}
frame_period: 5
log_f0_conditioning: {enuconfig.log_f0_conditioning}
use_world_codec: false
# Model-specific synthesis configs
timelag:
allowed_range: {enuconfig.timelag.allowed_range}
allowed_range_rest: {enuconfig.timelag.allowed_range_rest}
force_clip_input_features: true
duration:
force_clip_input_features: true
acoustic:
subphone_features: "coarse_coding"
force_clip_input_features: true
relative_f0: {enuconfig.acoustic.relative_f0}
"""
with open(out_dir / "config.yaml", "w") as f:
f.write(s)
logger.info(f"Contents of config.yaml: \n{s}")
logger.warning(
"""Assuming `use_world_codec: false` since the most of released ENUNU models
were trained with `use_world_codec: false`.
If you use the default feature extarction settings in newer NNSVS (> 0.0.3),
please set `use_world_codec: true` in the config.yaml
`use_world_codec` must be the same during the feature extraction and synthesis time.
"""
)
if __name__ == "__main__":
args = get_parser().parse_args(sys.argv[1:])
main(args.enunu_dir, args.out_dir, args.verbose)
| 5,291 | 34.756757 | 85 | py |
nnsvs | nnsvs-master/utils/merge_postfilters.py | import argparse
import os
import sys
from pathlib import Path
import torch
from omegaconf import OmegaConf
def get_parser():
parser = argparse.ArgumentParser(
description="Merge post-filters",
)
parser.add_argument("mgc_checkpoint", type=str, help="mgc checkpoint")
parser.add_argument("bap_checkpoint", type=str, help="bap checkpoint")
parser.add_argument("output_dir", type=str, help="out_dir")
return parser
if __name__ == "__main__":
args = get_parser().parse_args(sys.argv[1:])
mgc_checkpoint = torch.load(args.mgc_checkpoint, map_location="cpu")
bap_checkpoint = torch.load(args.bap_checkpoint, map_location="cpu")
for path in [args.mgc_checkpoint, args.bap_checkpoint]:
size = os.path.getsize(path)
print("Processisng:", path)
print(f"File size: {size / 1024/1024:.3f} MB")
mgc_model = OmegaConf.load(Path(args.mgc_checkpoint).parent / "model.yaml")
bap_model = OmegaConf.load(Path(args.bap_checkpoint).parent / "model.yaml")
if "postfilters.MultistreamPostFilter" not in mgc_model.netG._target_:
raise ValueError("Only MultistreamPostFilter is supported for now")
checkpoint = mgc_checkpoint
checkpoint["state_dict"].update(bap_checkpoint["state_dict"])
for k in ["optimizer_state", "lr_scheduler_state"]:
if k in checkpoint.keys():
del checkpoint[k]
Path(args.output_dir).mkdir(exist_ok=True, parents=True)
# Model definition
yaml_path = Path(args.output_dir) / "model.yaml"
mgc_model.netG.bap_postfilter = bap_model.netG.bap_postfilter
OmegaConf.save(mgc_model, yaml_path)
# Checkpoint
checkpoint_path = Path(args.output_dir) / "latest.pth"
torch.save(checkpoint, checkpoint_path)
size = os.path.getsize(checkpoint_path)
print(f"File size (after): {size / 1024/1024:.3f} MB")
| 1,864 | 32.303571 | 79 | py |
nnsvs | nnsvs-master/nnsvs/dsp.py | import torch
from scipy import signal
from torch import nn
from torch.nn import functional as F
# Part of code was adapted from:
# https://github.com/nii-yamagishilab/project-NN-Pytorch-scripts
def lowpass_filter(x, fs, cutoff=5, N=5):
"""Lowpass filter
Args:
x (np.ndarray): input signal
fs (int): sampling rate
cutoff (int): cutoff frequency
Returns:
np.ndarray: filtered signal
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
Wn = [norm_cutoff]
b, a = signal.butter(N, Wn, "lowpass")
if len(x) <= max(len(a), len(b)) * (N // 2 + 1):
# NOTE: input signal is too short
return x
# NOTE: use zero-phase filter
y = signal.filtfilt(b, a, x)
return y
def bandpass_filter(x, sr, cutoff=70):
"""Band-pass filter
Args:
x (np.ndarray): input signal
fs (int): sampling rate
cutoff (int): cutoff frequency
Returns:
np.ndarray: filtered signal
"""
nyquist = sr // 2
norm_cutoff = cutoff / nyquist
Wn = [norm_cutoff, 0.999]
b, a = signal.butter(5, Wn, "bandpass")
y = signal.filtfilt(b, a, x)
return y
class TimeInvFIRFilter(nn.Conv1d):
"""Time-invatiant FIR filter implementation
Args:
channels (int): input channels
filt_coef (torch.Tensor): FIR filter coefficients
causal (bool): causal
requires_grad (bool): trainable kernel or not
"""
def __init__(self, channels, filt_coef, causal=True, requires_grad=False):
# assuming 1-D filter coef vector and odd num taps
assert len(filt_coef.shape) == 1
# assert len(filt_coef) % 2 == 1
kernel_size = len(filt_coef)
self.causal = causal
if causal:
padding = (kernel_size - 1) * 1
else:
padding = (kernel_size - 1) // 2 * 1
# channel-wise filtering (groups=channels)
super(TimeInvFIRFilter, self).__init__(
channels, channels, kernel_size, padding=padding, groups=channels, bias=None
)
self.weight.data[:, :, :] = filt_coef.flip(-1)
self.weight.requires_grad = requires_grad
def forward(self, x):
out = super(TimeInvFIRFilter, self).forward(x)
out = out[:, :, : -self.padding[0]] if self.causal else out
return out
class TrTimeInvFIRFilter(nn.Conv1d):
"""Trainable Time-invatiant FIR filter implementation
H(z) = \\sigma_{k=0}^{filt_dim} b_{k}z_{-k}
Note that b_{0} is fixed to 1 if fixed_0th is True.
Args:
channels (int): input channels
filt_dim (int): FIR filter dimension
causal (bool): causal
tanh (bool): apply tanh to filter coef or not.
fixed_0th (bool): fix the first filt coef to 1 or not.
"""
def __init__(self, channels, filt_dim, causal=True, tanh=True, fixed_0th=True):
# Initialize filt coef with small random values
init_filt_coef = torch.randn(filt_dim) * (1 / filt_dim)
# assert len(filt_coef) % 2 == 1
kernel_size = len(init_filt_coef)
self.causal = causal
if causal:
padding = (kernel_size - 1) * 1
else:
padding = (kernel_size - 1) // 2 * 1
# channel-wise filtering (groups=channels)
super(TrTimeInvFIRFilter, self).__init__(
channels, channels, kernel_size, padding=padding, groups=channels, bias=None
)
self.weight.data[:, :, :] = init_filt_coef.flip(-1)
self.weight.requires_grad = True
self.tanh = tanh
self.fixed_0th = fixed_0th
def get_filt_coefs(self):
# apply tanh for filtter stability
b = torch.tanh(self.weight) if self.tanh else self.weight
b = b.clone()
if self.fixed_0th:
b[:, :, -1] = 1
return b
def forward(self, x):
b = self.get_filt_coefs()
out = F.conv1d(
x, b, self.bias, self.stride, self.padding, self.dilation, self.groups
)
if self.padding[0] > 0:
out = out[:, :, : -self.padding[0]] if self.causal else out
return out
| 4,140 | 28.578571 | 88 | py |
nnsvs | nnsvs-master/nnsvs/base.py | from enum import Enum
from torch import nn
class PredictionType(Enum):
"""Prediction types"""
DETERMINISTIC = 1
"""Deterministic prediction
Non-MDN single-stream models should use this type.
Pseudo code:
.. code-block::
# training
y = model(x)
# inference
y = model.inference(x)
"""
PROBABILISTIC = 2
"""Probabilistic prediction with mixture density networks
MDN-based models should use this type.
Pseudo code:
.. code-block::
# training
mdn_params = model(x)
# inference
mu, sigma = model.inference(x)
"""
MULTISTREAM_HYBRID = 3
"""Multi-stream preodictions where each prediction can be
detereministic or probabilistic
Multi-stream models should use this type.
Pseudo code:
.. code-block::
# training
feature_streams = model(x) # e.g. (mgc, lf0, vuv, bap) or (mel, lf0, vuv)
# inference
y = model.inference(x)
Note that concatenated features are assumed to be returned during inference.
"""
DIFFUSION = 4
"""Diffusion model's prediction
NOTE: may subject to change in the future
Pseudo code:
.. code-block::
# training
noise, x_recon = model(x)
# inference
y = model.inference(x)
"""
class BaseModel(nn.Module):
"""Base class for all models
If you want to implement your custom model, you should inherit from this class.
You must need to implement the forward method. Other methods are optional.
"""
def forward(self, x, lengths=None, y=None):
"""Forward pass
Args:
x (tensor): input features
lengths (tensor): lengths of the input features
y (tensor): optional target features
Returns:
tensor: output features
"""
pass
def inference(self, x, lengths=None):
"""Inference method
If you want to implement custom inference method such as autoregressive sampling,
please override this method.
Defaults to call the forward method.
Args:
x (tensor): input features
lengths (tensor): lengths of the input features
Returns:
tensor: output features
"""
return self(x, lengths)
def preprocess_target(self, y):
"""Preprocess target signals at training time
This is useful for shallow AR models in which a FIR filter
is used for the target signals. For other types of model, you don't need to
implement this method.
Defaults to do nothing.
Args:
y (tensor): target features
Returns:
tensor: preprocessed target features
"""
return y
def prediction_type(self):
"""Prediction type.
If your model has a MDN layer, please return ``PredictionType.PROBABILISTIC``.
Returns:
PredictionType: Determisitic or probabilistic. Default is deterministic.
"""
return PredictionType.DETERMINISTIC
def is_autoregressive(self):
"""Is autoregressive or not
If your custom model is an autoregressive model, please return ``True``. In that case,
you would need to implement autoregressive sampling in :py:meth:`inference`.
Returns:
bool: True if autoregressive. Default is False.
"""
return False
def has_residual_lf0_prediction(self):
"""Whether the model has residual log-F0 prediction or not.
This should only be used for acoustic models.
Returns:
bool: True if the model has residual log-F0 prediction. Default is False.
"""
return False
| 3,784 | 22.955696 | 94 | py |
nnsvs | nnsvs-master/nnsvs/multistream.py | # Utils for multi-stream features
import numpy as np
import torch
from nnmnkwii import paramgen
def get_windows(num_window=1):
"""Get windows for MLPG.
Args:
num_window (int): number of windows
Returns:
list: list of windows
"""
windows = [(0, 0, np.array([1.0]))]
if num_window >= 2:
windows.append((1, 1, np.array([-0.5, 0.0, 0.5])))
if num_window >= 3:
windows.append((1, 1, np.array([1.0, -2.0, 1.0])))
if num_window >= 4:
raise ValueError(f"Not supported num windows: {num_window}")
return windows
def select_streams(
inputs,
stream_sizes=None,
streams=None,
concat=True,
):
"""Select streams from multi-stream features
Args:
inputs (array like): input 3-d or 2-d array
stream_sizes (list): stream sizes
streams (list): Streams of interests. Returns all streams if streams is None.
concat (bool): Concatenate streams. Defaults to True.
Returns:
array like: selected streams
"""
if stream_sizes is None:
stream_sizes = [60, 1, 1, 1]
if streams is None:
streams = [True] * len(stream_sizes)
ret = []
start_indices = np.hstack(([0], np.cumsum(stream_sizes)[:-1]))
for start_idx, size, enabled in zip(start_indices, stream_sizes, streams):
if not enabled:
continue
if len(inputs.shape) == 3:
s = inputs[:, :, start_idx : start_idx + size]
else:
s = inputs[:, start_idx : start_idx + size]
ret.append(s)
if not concat:
return ret
if isinstance(inputs, torch.Tensor):
return torch.cat(ret, dim=-1)
else:
return np.concatenate(ret, -1)
def split_streams(inputs, stream_sizes=None):
"""Split streams from multi-stream features
Args:
inputs (array like): input 3-d array
stream_sizes (list): sizes for each stream
Returns:
list: list of stream features
"""
if stream_sizes is None:
stream_sizes = [60, 1, 1, 1]
ret = []
start_indices = np.hstack(([0], np.cumsum(stream_sizes)[:-1]))
for start_idx, size in zip(start_indices, stream_sizes):
if len(inputs.shape) == 3:
s = inputs[:, :, start_idx : start_idx + size]
else:
s = inputs[:, start_idx : start_idx + size]
ret.append(s)
return ret
def get_static_stream_sizes(stream_sizes, has_dynamic_features, num_windows):
"""Get stream sizes for static features
Args:
inputs (array like): input 3-d or 2-d array
num_windows (int): number of windows
stream_sizes (list): stream sizes
has_dynamic_features (list): binary flags that indicates if steams have dynamic features
streams (list, optional): Streams of interests. Returns all streams if streams is None.
Defaults to None.
Returns:
list: stream sizes
"""
static_stream_sizes = np.array(stream_sizes)
static_stream_sizes[has_dynamic_features] = (
static_stream_sizes[has_dynamic_features] / num_windows
)
return static_stream_sizes
def get_static_features(
inputs,
num_windows,
stream_sizes=None,
has_dynamic_features=None,
streams=None,
):
"""Get static features from static+dynamic features
Args:
inputs (array like): input 3-d or 2-d array
num_windows (int): number of windows
stream_sizes (list): stream sizes
has_dynamic_features (list): binary flags that indicates if steams have dynamic features
streams (list, optional): Streams of interests. Returns all streams if streams is None.
Defaults to None.
Returns:
list: list of static features
"""
if stream_sizes is None:
stream_sizes = [180, 3, 1, 15]
if has_dynamic_features is None:
has_dynamic_features = [True, True, False, True]
if streams is None:
streams = [True] * len(stream_sizes)
_, _, D = inputs.shape
if stream_sizes is None or (len(stream_sizes) == 1 and has_dynamic_features[0]):
return inputs[:, :, : D // num_windows]
if len(stream_sizes) == 1 and not has_dynamic_features[0]:
return inputs
# Multi stream case
ret = []
start_indices = np.hstack(([0], np.cumsum(stream_sizes)[:-1]))
for start_idx, size, v, enabled in zip(
start_indices, stream_sizes, has_dynamic_features, streams
):
start_idx = int(start_idx)
size = int(size)
if not enabled:
continue
if v:
static_features = inputs[:, :, start_idx : start_idx + size // num_windows]
else:
static_features = inputs[:, :, start_idx : start_idx + size]
ret.append(static_features)
return ret
def multi_stream_mlpg(
inputs,
variances,
windows,
stream_sizes=None,
has_dynamic_features=None,
streams=None,
):
"""Split streams and do apply MLPG if stream has dynamic features
Args:
inputs (array like): input 3-d or 2-d array
variances (array like): variances of input features
windows (list): windows for parameter generation
stream_sizes (list): stream sizes
has_dynamic_features (list): binary flags that indicates if steams have dynamic features
streams (list, optional): Streams of interests. Returns all streams if streams is None.
Defaults to None.
Raises:
RuntimeError: if stream sizes are wrong
Returns:
array like: generated static features
"""
if stream_sizes is None:
stream_sizes = [180, 3, 1, 3]
if has_dynamic_features is None:
has_dynamic_features = [True, True, False, True]
if streams is None:
streams = [True] * len(stream_sizes)
T, D = inputs.shape
if D != sum(stream_sizes):
raise RuntimeError("You probably have specified wrong dimension params.")
# Straem indices for static+delta features
# [0, 180, 183, 184]
start_indices = np.hstack(([0], np.cumsum(stream_sizes)[:-1]))
# [180, 183, 184, 199]
end_indices = np.cumsum(stream_sizes)
ret = []
for in_start_idx, in_end_idx, v, enabled in zip(
start_indices,
end_indices,
has_dynamic_features,
streams,
):
if not enabled:
continue
x = inputs[:, in_start_idx:in_end_idx]
if inputs.shape == variances.shape:
var_ = variances[:, in_start_idx:in_end_idx]
else:
var_ = np.tile(variances[in_start_idx:in_end_idx], (T, 1))
y = paramgen.mlpg(x, var_, windows) if v else x
ret.append(y)
return np.concatenate(ret, -1)
| 6,737 | 28.946667 | 96 | py |
nnsvs | nnsvs-master/nnsvs/discriminators.py | """Discriminator implementations mostly used for GAN-based post-filters.
All the discriminators must returns list of tensors.
The last tensor of the list is regarded as the output of the discrminator.
The others are used as intermedieate feature maps.
"""
import numpy as np
import torch
from nnsvs.util import init_weights
from torch import nn
class Conv2dD(nn.Module):
"""Conv2d-based discriminator
The implementation follows the discrimiantor of the GAN-based post-filters
in :cite:t:`Kaneko2017Interspeech`.
Args:
in_dim (int): Input feature dim
channels (int): Number of channels
kernel_size (tuple): Kernel size for 2d-convolution
padding (tuple): Padding for 2d-convolution
last_sigmoid (bool): If True, apply sigmoid on the output
init_type (str): Initialization type
padding_mode (str): Padding mode
"""
def __init__(
self,
in_dim=None,
channels=64,
kernel_size=(5, 3),
padding=(0, 0),
last_sigmoid=False,
init_type="kaiming_normal",
padding_mode="zeros",
):
super().__init__()
self.last_sigmoid = last_sigmoid
C = channels
ks = np.asarray(list(kernel_size))
if padding is None:
padding = (ks - 1) // 2
self.convs = nn.ModuleList()
self.convs.append(
nn.Sequential(
nn.Conv2d(
1,
C,
kernel_size=ks,
padding=padding,
stride=(1, 1),
padding_mode=padding_mode,
),
nn.LeakyReLU(0.2),
)
)
self.convs.append(
nn.Sequential(
nn.Conv2d(
C,
2 * C,
kernel_size=ks,
padding=padding,
stride=(2, 1),
padding_mode=padding_mode,
),
nn.LeakyReLU(0.2),
)
)
self.convs.append(
nn.Sequential(
nn.Conv2d(
2 * C,
4 * C,
kernel_size=ks,
padding=padding,
stride=(2, 1),
padding_mode=padding_mode,
),
nn.LeakyReLU(0.2),
)
)
self.convs.append(
nn.Sequential(
nn.Conv2d(
4 * C,
2 * C,
kernel_size=ks,
padding=padding,
stride=(2, 1),
padding_mode=padding_mode,
),
nn.LeakyReLU(0.2),
)
)
self.last_conv = nn.Conv2d(
2 * C,
1,
kernel_size=ks,
padding=padding,
stride=(1, 1),
padding_mode=padding_mode,
)
init_weights(self, init_type)
def forward(self, x, c=None, lengths=None):
"""Forward step
Args:
x (torch.Tensor): Input tensor
c (torch.Tensor): Optional conditional features
lengths (torch.Tensor): Optional lengths of the input
Returns:
list: List of output tensors
"""
outs = []
# (B, T, C) -> (B, 1, T, C):
x = x.unsqueeze(1)
for conv in self.convs:
x = conv(x)
outs.append(x)
y = self.last_conv(x)
y = torch.sigmoid(y) if self.last_sigmoid else y
# (B, 1, T, C) -> (B, T, C)
y = y.squeeze(1)
outs.append(y)
return [outs]
| 3,746 | 26.755556 | 78 | py |
nnsvs | nnsvs-master/nnsvs/mdn.py | import torch
import torch.nn.functional as F
from torch import nn
class MDNLayer(nn.Module):
"""Mixture Density Network layer
The input maps to the parameters of a Mixture of Gaussians (MoG) probability
distribution, where each Gaussian has out_dim dimensions and diagonal covariance.
If dim_wise is True, features for each dimension are modeld by independent 1-D GMMs
instead of modeling jointly. This would workaround training difficulty
especially for high dimensional data.
Implementation references:
1. Mixture Density Networks by Mike Dusenberry
https://mikedusenberry.com/mixture-density-networks
2. PRML book
https://www.microsoft.com/en-us/research/people/cmbishop/prml-book/
3. sagelywizard/pytorch-mdn
https://github.com/sagelywizard/pytorch-mdn
4. sksq96/pytorch-mdn
https://github.com/sksq96/pytorch-mdn
Attributes:
in_dim (int): the number of dimensions in the input
out_dim (int): the number of dimensions in the output
num_gaussians (int): the number of mixture component
dim_wise (bool): whether to model data for each dimension separately
"""
def __init__(self, in_dim, out_dim, num_gaussians=30, dim_wise=False):
super(MDNLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.num_gaussians = num_gaussians
self.dim_wise = dim_wise
odim_log_pi = out_dim * num_gaussians if dim_wise else num_gaussians
self.log_pi = nn.Linear(in_dim, odim_log_pi)
self.log_sigma = nn.Linear(in_dim, out_dim * num_gaussians)
self.mu = nn.Linear(in_dim, out_dim * num_gaussians)
def forward(self, minibatch):
"""Forward for MDN
Args:
minibatch (torch.Tensor): tensor of shape (B, T, D_in)
B is the batch size and T is data lengths of this batch,
and D_in is in_dim.
Returns:
torch.Tensor: Tensor of shape (B, T, G) or (B, T, G, D_out)
Log of mixture weights. G is num_gaussians and D_out is out_dim.
torch.Tensor: Tensor of shape (B, T, G, D_out)
the log of standard deviation of each Gaussians.
torch.Tensor: Tensor of shape (B, T, G, D_out)
mean of each Gaussians
"""
B = len(minibatch)
if self.dim_wise:
# (B, T, G, D_out)
log_pi = self.log_pi(minibatch).view(
B, -1, self.num_gaussians, self.out_dim
)
log_pi = F.log_softmax(log_pi, dim=2)
else:
# (B, T, G)
log_pi = F.log_softmax(self.log_pi(minibatch), dim=2)
log_sigma = self.log_sigma(minibatch)
log_sigma = log_sigma.view(B, -1, self.num_gaussians, self.out_dim)
mu = self.mu(minibatch)
mu = mu.view(B, -1, self.num_gaussians, self.out_dim)
return log_pi, log_sigma, mu
def mdn_loss(
log_pi, log_sigma, mu, target, log_pi_min=-7.0, log_sigma_min=-7.0, reduce=True
):
"""Calculates the error, given the MoG parameters and the target.
The loss is the negative log likelihood of the data given the MoG
parameters.
Args:
log_pi (torch.Tensor): Tensor of shape (B, T, G) or (B, T, G, D_out)
The log of multinomial distribution of the Gaussians. B is the batch size,
T is data length of this batch, and G is num_gaussians of class MDNLayer.
log_sigma (torch.Tensor): Tensor of shape (B, T, G ,D_out)
The log standard deviation of the Gaussians. D_out is out_dim of class
MDNLayer.
mu (torch.Tensor): Tensor of shape (B, T, G, D_out)
The means of the Gaussians.
target (torch.Tensor): Tensor of shape (B, T, D_out)
The target variables.
log_pi_min (float): Minimum value of log_pi (for numerical stability)
log_sigma_min (float): Minimum value of log_sigma (for numerical stability)
reduce: If True, the losses are averaged for each batch.
Returns:
loss (B) or (B, T): Negative Log Likelihood of Mixture Density Networks.
"""
dim_wise = len(log_pi.shape) == 4
# Clip log_sigma and log_pi with log_clamp_min for numerical stability
log_sigma = torch.clamp(log_sigma, min=log_sigma_min)
log_pi = torch.clamp(log_pi, min=log_pi_min)
# Expand the dim of target as (B, T, D_out) -> (B, T, 1, D_out) -> (B, T,G, D_out)
target = target.unsqueeze(2).expand_as(log_sigma)
# Center target variables and clamp them within +/- 5SD for numerical stability.
centered_target = target - mu
scale = torch.exp(log_sigma)
edge = 5 * scale
centered_target = torch.where(centered_target > edge, edge, centered_target)
centered_target = torch.where(centered_target < -edge, -edge, centered_target)
# Create gaussians with mean=0 and variance=torch.exp(log_sigma)^2
dist = torch.distributions.Normal(loc=0, scale=scale)
log_prob = dist.log_prob(centered_target)
if dim_wise:
# (B, T, D_out. D_out)
loss = log_prob + log_pi
else:
# Here we assume that the covariance matrix of multivariate Gaussian
# distribution is diagonal to handle the mean and the variance in each
# dimension separately.
# Reference:
# https://markusthill.github.io/gaussian-distribution-with-a-diagonal-covariance-matrix/
# log pi(x)N(y|mu(x),sigma(x)) = log pi(x) + log N(y|mu(x),sigma(x))
# log N(y_1,y_2,...,y_{D_out}|mu(x),sigma(x))
# = log N(y_1|mu(x),sigma(x))...N(y_{D_out}|mu(x),sigma(x))
# = \sum_{i=1}^{D_out} log N(y_i|mu(x),sigma(x))
# (B, T, G, D_out) -> (B, T, G)
loss = torch.sum(log_prob, dim=3) + log_pi
# Calculate negative log likelihood.
# Use torch.log_sum_exp instead of the combination of torch.sum and torch.log
# (Reference: https://github.com/r9y9/nnsvs/pull/20#discussion_r495514563)
# if dim_wise is True: (B, T, G, D_out) -> (B, T, D_out)
# else (B, T, G) -> (B, T)
loss = -torch.logsumexp(loss, dim=2)
if reduce:
# (B, T) -> (B)
return torch.mean(loss, dim=1)
else:
# not averaged (for applying mask later)
# (B, T)
return loss
return
# from r9y9/wavenet_vocoder/wavenet_vocoder/mixture.py
def to_one_hot(tensor, n, fill_with=1.0):
# we perform one hot encore with respect to the last axis
one_hot = torch.FloatTensor(tensor.size() + (n,)).zero_()
if tensor.is_cuda:
one_hot = one_hot.cuda()
one_hot.scatter_(len(tensor.size()), tensor.unsqueeze(-1), fill_with)
return one_hot
def mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu):
"""Return the mean and standard deviation of the Gaussian component
whose weight coefficient is the largest as the most probable predictions.
Args:
log_pi (torch.Tensor): Tensor of shape (B, T, G) or (B, T, G, D_out)
The log of multinomial distribution of the Gaussians.
B is the batch size, T is data length of this batch,
G is num_gaussians of class MDNLayer.
log_sigma (torch.Tensor): Tensor of shape (B, T, G, D_out)
The standard deviation of the Gaussians. D_out is out_dim of class
MDNLayer.
mu (torch.Tensor): Tensor of shape (B, T, G, D_out)
The means of the Gaussians. D_out is out_dim of class MDNLayer.
Returns:
tuple: tuple of torch.Tensor
torch.Tensor of shape (B, T, D_out). The standardd deviations
of the most probable Gaussian component.
torch.Tensor of shape (B, T, D_out). Means of the Gaussians.
"""
dim_wise = len(log_pi.shape) == 4
_, _, num_gaussians, _ = mu.shape
# Get the indexes of the largest log_pi
_, max_component = torch.max(log_pi, dim=2) # (B, T) or (B, T, C_out)
# Convert max_component to one_hot manner
# if dim_wise: (B, T, D_out) -> (B, T, D_out, G)
# else: (B, T) -> (B, T, G)
one_hot = to_one_hot(max_component, num_gaussians)
if dim_wise:
# (B, T, G, D_out)
one_hot = one_hot.transpose(2, 3)
assert one_hot.shape == mu.shape
else:
# Expand the dim of one_hot as (B, T, G) -> (B, T, G, d_out)
one_hot = one_hot.unsqueeze(3).expand_as(mu)
# Multiply one_hot and sum to get mean(mu) and standard deviation(sigma)
# of the Gaussians whose weight coefficient(log_pi) is the largest.
# (B, T, G, d_out) -> (B, T, d_out)
max_mu = torch.sum(mu * one_hot, dim=2)
max_sigma = torch.exp(torch.sum(log_sigma * one_hot, dim=2))
return max_sigma, max_mu
def mdn_get_sample(log_pi, log_sigma, mu):
"""Sample from mixture of the Gaussian component whose weight coefficient is
the largest as the most probable predictions.
Args:
log_pi (torch.Tensor): Tensor of shape (B, T, G) or (B, T, G, D_out)
The log of multinomial distribution of the Gaussians.
B is the batch size, T is data length of this batch,
G is num_gaussians of class MDNLayer.
log_sigma (torch.Tensor): Tensor of shape (B, T, G, D_out)
The log of standard deviation of the Gaussians.
D_out is out_dim of class MDNLayer.
mu (torch.Tensor): Tensor of shape (B, T, G, D_out)
The means of the Gaussians. D_out is out_dim of class MDNLayer.
Returns:
torch.Tensor: Tensor of shape (B, T, D_out)
Sample from the mixture of the Gaussian component.
"""
max_sigma, max_mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
# Create gaussians with mean=max_mu and variance=max_log_sigma^2
dist = torch.distributions.Normal(loc=max_mu, scale=max_sigma)
# Sample from normal distribution
sample = dist.sample()
return sample
| 9,902 | 39.753086 | 96 | py |
nnsvs | nnsvs-master/nnsvs/svs.py | import json
import time
from copy import deepcopy
from pathlib import Path
import numpy as np
import torch
from hydra.utils import instantiate
from nnmnkwii.io import hts
from nnmnkwii.preprocessing.f0 import interp1d
from nnsvs.gen import (
postprocess_acoustic,
postprocess_duration,
postprocess_waveform,
predict_acoustic,
predict_duration,
predict_timelag,
predict_waveform,
)
from nnsvs.io.hts import (
full_to_mono,
get_note_indices,
get_pitch_index,
get_pitch_indices,
label2phrases,
label2phrases_str,
segment_labels,
)
from nnsvs.logger import getLogger
from nnsvs.usfgan import USFGANWrapper
from nnsvs.util import MinMaxScaler, StandardScaler, extract_static_scaler, load_vocoder
from omegaconf import OmegaConf
class BaseSVS(object):
"""Base class for singing voice synthesis (SVS) inference
All SVS engines should inherit from this class.
The input of the SVS engine uses the HTS-style full-context labels.
The output should be a tuple of raw waveform and sampling rate.
To allow language-independent SVS, this base class does not define
the interface for the frontend functionality such as
converting musicXML/UST to HTS labels. The frontend processing
should be done externally (e.g., using pysinsy or utaupy) or can
be implemented with an optional method.
"""
def svs(self, labels, *args, **kwargs):
"""Run SVS inference and returns the synthesized waveform
Args:
labels (nnmnkwii.io.hts.HTSLabelFile): HTS labels
Returns:
tuple: (waveform, sampling rate)
"""
pass
class SPSVS(BaseSVS):
"""Statistical parametric singing voice synthesis (SPSVS)
Use the ``svs`` method for the simplest inference, or use the
separated methods (e.g.,``predict_acoustic`` and ``predict_waveform``)
to control each components of the parametric SVS system.
Args:
model_dir (str): directory of the model
device (str): cpu or cuda
verbose (int): verbosity level
Examples:
Synthesize wavefrom from a musicxml file
.. plot::
import numpy as np
import pysinsy
from nnmnkwii.io import hts
from nnsvs.pretrained import retrieve_pretrained_model
from nnsvs.svs import SPSVS
from nnsvs.util import example_xml_file
import matplotlib.pyplot as plt
# Instantiate the SVS engine
model_dir = retrieve_pretrained_model("r9y9/yoko_latest")
engine = SPSVS(model_dir)
# Extract HTS labels from a MusicXML file
contexts = pysinsy.extract_fullcontext(example_xml_file(key="get_over"))
labels = hts.HTSLabelFile.create_from_contexts(contexts)
# Run inference
wav, sr = engine.svs(labels)
# Plot the result
fig, ax = plt.subplots(figsize=(8,2))
librosa.display.waveshow(wav.astype(np.float32), sr=sr, ax=ax)
With WORLD vocoder:
>>> wav, sr = engine.svs(labels, vocoder_type="world")
With a uSFGAN or SiFiGAN vocoder:
>>> wav, sr = engine.svs(labels, vocoder_type="usfgan")
"""
def __init__(self, model_dir, device="cpu", verbose=0):
self.device = device
# NOTE: assuming that the logger is instantiated without hydra
# needs to add stream handler to the logger explicitly
self.logger = getLogger(verbose=verbose, add_stream_handler=True)
if isinstance(model_dir, str):
model_dir = Path(model_dir)
# search for config.yaml
assert model_dir / "config.yaml"
self.config = OmegaConf.load(model_dir / "config.yaml")
self.feature_type = self.config.get("feature_type", "world")
self.sample_rate = self.config.get("sample_rate", 48000)
# qst
self.binary_dict, self.numeric_dict = hts.load_question_set(
model_dir / "qst.hed"
)
self.pitch_idx = get_pitch_index(self.binary_dict, self.numeric_dict)
self.pitch_indices = get_pitch_indices(self.binary_dict, self.numeric_dict)
# Time-lag model
self.timelag_config = OmegaConf.load(model_dir / "timelag_model.yaml")
self.timelag_model = instantiate(self.timelag_config.netG).to(device)
checkpoint = torch.load(
model_dir / "timelag_model.pth",
map_location=device,
)
self.timelag_model.load_state_dict(checkpoint["state_dict"])
self.timelag_in_scaler = MinMaxScaler(
np.load(model_dir / "in_timelag_scaler_min.npy"),
np.load(model_dir / "in_timelag_scaler_scale.npy"),
)
self.timelag_out_scaler = StandardScaler(
np.load(model_dir / "out_timelag_scaler_mean.npy"),
np.load(model_dir / "out_timelag_scaler_var.npy"),
np.load(model_dir / "out_timelag_scaler_scale.npy"),
)
self.timelag_model.eval()
# Duration model
self.duration_config = OmegaConf.load(model_dir / "duration_model.yaml")
self.duration_model = instantiate(self.duration_config.netG).to(device)
checkpoint = torch.load(
model_dir / "duration_model.pth",
map_location=device,
)
self.duration_model.load_state_dict(checkpoint["state_dict"])
self.duration_in_scaler = MinMaxScaler(
np.load(model_dir / "in_duration_scaler_min.npy"),
np.load(model_dir / "in_duration_scaler_scale.npy"),
)
self.duration_out_scaler = StandardScaler(
np.load(model_dir / "out_duration_scaler_mean.npy"),
np.load(model_dir / "out_duration_scaler_var.npy"),
np.load(model_dir / "out_duration_scaler_scale.npy"),
)
self.duration_model.eval()
# Acoustic model
self.acoustic_config = OmegaConf.load(model_dir / "acoustic_model.yaml")
self.acoustic_model = instantiate(self.acoustic_config.netG).to(device)
checkpoint = torch.load(
model_dir / "acoustic_model.pth",
map_location=device,
)
self.acoustic_model.load_state_dict(checkpoint["state_dict"])
self.acoustic_in_scaler = MinMaxScaler(
np.load(model_dir / "in_acoustic_scaler_min.npy"),
np.load(model_dir / "in_acoustic_scaler_scale.npy"),
)
self.acoustic_out_scaler = StandardScaler(
np.load(model_dir / "out_acoustic_scaler_mean.npy"),
np.load(model_dir / "out_acoustic_scaler_var.npy"),
np.load(model_dir / "out_acoustic_scaler_scale.npy"),
)
# NOTE: this is used for GV post-filtering
self.acoustic_out_static_scaler = extract_static_scaler(
self.acoustic_out_scaler, self.acoustic_config
)
# (Optional) lf0 model
if (model_dir / "lf0_model.pth").exists():
assert hasattr(self.acoustic_model, "lf0_model")
self.logger.info("Loading an external lf0 model.")
checkpoint = torch.load(
model_dir / "lf0_model.pth",
map_location=device,
)
self.acoustic_model.lf0_model.load_state_dict(checkpoint["state_dict"])
self.acoustic_model.eval()
# Post-filter
if (model_dir / "postfilter_model.yaml").exists():
self.postfilter_config = OmegaConf.load(model_dir / "postfilter_model.yaml")
self.postfilter_model = instantiate(self.postfilter_config.netG).to(device)
checkpoint = torch.load(
model_dir / "postfilter_model.pth",
map_location=device,
)
self.postfilter_model.load_state_dict(checkpoint["state_dict"])
self.postfilter_model.eval()
self.postfilter_out_scaler = StandardScaler(
np.load(model_dir / "out_postfilter_scaler_mean.npy"),
np.load(model_dir / "out_postfilter_scaler_var.npy"),
np.load(model_dir / "out_postfilter_scaler_scale.npy"),
)
else:
self.postfilter_model = None
self.postfilter_config = None
self.postfilter_out_scaler = None
# Vocoder model
if (model_dir / "vocoder_model.pth").exists():
self.vocoder, self.vocoder_in_scaler, self.vocoder_config = load_vocoder(
model_dir / "vocoder_model.pth", device, self.acoustic_config
)
else:
self.logger.info(
"No trained vocoder model found. WORLD vocoder will be used."
)
self.vocoder = None
self.vocoder_config = None
self.vocoder_in_scaler = None
def __repr__(self):
timelag_str = json.dumps(
OmegaConf.to_container(self.timelag_config.netG),
sort_keys=False,
indent=4,
)
duration_str = json.dumps(
OmegaConf.to_container(self.duration_config.netG),
sort_keys=False,
indent=4,
)
acoustic_str = json.dumps(
OmegaConf.to_container(self.acoustic_config.netG),
sort_keys=False,
indent=4,
)
repr = f"""Statistical parametric SVS (sampling rate: {self.sample_rate})
Time-lag model: {timelag_str}
Duration model: {duration_str}
Acoustic model: {acoustic_str}
"""
if self.postfilter_model is not None:
postfilter_str = json.dumps(
OmegaConf.to_container(self.postfilter_config.netG),
sort_keys=False,
indent=4,
)
repr += f"Post-filter model: {postfilter_str}\n"
else:
repr += "Post-filter model: None\n"
if self.vocoder is not None:
if (
"generator" in self.vocoder_config
and "discriminator" in self.vocoder_config
):
# usfgan
vocoder_params = OmegaConf.to_container(
self.vocoder_config["generator"], throw_on_missing=True
)
else:
vocoder_params = {
"generator_type": self.vocoder_config.get(
"generator_type", "ParallelWaveGANGenerator" # type: ignore
),
"generator_params": OmegaConf.to_container(
self.vocoder_config.generator_params
),
}
vocoder_str = json.dumps(
vocoder_params,
sort_keys=False,
indent=4,
)
repr += f"Vocoder model: {vocoder_str}\n"
else:
repr += "Vocoder model: WORLD\n"
return repr
def set_device(self, device):
"""Set device for the SVS model
Args:
device (str): cpu or cuda.
"""
self.logger.info(f"Set device to {device}")
self.device = device
self.timelag_model.to(device)
self.duration_model.to(device)
self.acoustic_model.to(device)
self.postfilter_model.to(device) if self.postfilter_model is not None else None
self.vocoder.to(device) if self.vocoder is not None else None
def predict_timelag(self, labels):
"""Predict time-ag from HTS labels
Args:
labels (nnmnkwii.io.hts.HTSLabelFile): HTS labels.
Returns:
ndarray: Predicted time-lag.
"""
start_time = time.time()
lag = predict_timelag(
self.device,
labels,
timelag_model=self.timelag_model,
timelag_config=self.timelag_config,
timelag_in_scaler=self.timelag_in_scaler,
timelag_out_scaler=self.timelag_out_scaler,
binary_dict=self.binary_dict,
numeric_dict=self.numeric_dict,
pitch_indices=self.pitch_indices,
log_f0_conditioning=self.config.log_f0_conditioning,
allowed_range=self.config.timelag.allowed_range,
allowed_range_rest=self.config.timelag.allowed_range_rest,
force_clip_input_features=self.config.timelag.force_clip_input_features,
frame_period=self.config.frame_period,
)
self.logger.info(
f"Elapsed time for time-lag prediction: {time.time() - start_time:.3f} sec"
)
return lag
def predict_duration(self, labels):
"""Predict durations from HTS labels
Args:
labels (nnmnkwii.io.hts.HTSLabelFile): HTS labels.
Returns:
ndarray: Predicted durations.
"""
start_time = time.time()
durations = predict_duration(
self.device,
labels,
duration_model=self.duration_model,
duration_config=self.duration_config,
duration_in_scaler=self.duration_in_scaler,
duration_out_scaler=self.duration_out_scaler,
binary_dict=self.binary_dict,
numeric_dict=self.numeric_dict,
pitch_indices=self.pitch_indices,
log_f0_conditioning=self.config.log_f0_conditioning,
force_clip_input_features=self.config.duration.force_clip_input_features,
frame_period=self.config.frame_period,
)
self.logger.info(
f"Elapsed time for duration prediction: {time.time() - start_time:.3f} sec"
)
return durations
def postprocess_duration(self, labels, pred_durations, lag):
"""Post-process durations
Args:
labels (nnmnkwii.io.hts.HTSLabelFile): HTS labels.
pred_durations (ndarray): Predicted durations.
lag (ndarray): Predicted time-lag.
Returns:
nnmnkwii.io.hts.HTSLabelFile: duration modified HTS labels.
"""
start_time = time.time()
duration_modified_labels = postprocess_duration(
labels, pred_durations, lag, frame_period=self.config.frame_period
)
self.logger.info(
f"Elapsed time for duration post-processing: {time.time() - start_time:.3f} sec"
)
return duration_modified_labels
def predict_timing(self, labels):
"""Predict timing from HTS labels
Args:
labels (nnmnkwii.io.hts.HTSLabelFile): HTS labels.
Returns:
nnmnkwii.io.hts.HTSLabelFile: duration modified HTS labels.
"""
lag = self.predict_timelag(labels)
durations = self.predict_duration(labels)
duration_modified_full_labels = self.postprocess_duration(
labels, durations, lag
)
return duration_modified_full_labels
def predict_acoustic(self, duration_modified_labels, f0_shift_in_cent=0):
"""Predict acoustic features from HTS labels
Args:
duration_modified_labels (nnmnkwii.io.hts.HTSLabelFile): HTS labels.
f0_shift_in_cent (float): F0 shift in cent.
Returns:
ndarray: Predicted acoustic features.
"""
start_time = time.time()
acoustic_features = predict_acoustic(
device=self.device,
labels=duration_modified_labels,
acoustic_model=self.acoustic_model,
acoustic_config=self.acoustic_config,
acoustic_in_scaler=self.acoustic_in_scaler,
acoustic_out_scaler=self.acoustic_out_scaler,
binary_dict=self.binary_dict,
numeric_dict=self.numeric_dict,
subphone_features=self.acoustic_config.get(
"subphone_features", "coarse_coding"
),
pitch_indices=self.pitch_indices,
log_f0_conditioning=self.config.log_f0_conditioning,
force_clip_input_features=self.acoustic_config.get(
"force_clip_input_features", True
),
frame_period=self.config.frame_period,
f0_shift_in_cent=f0_shift_in_cent,
)
self.logger.info(
f"Elapsed time for acoustic feature prediction: {time.time() - start_time:.3f} sec"
)
# log real-time factor (RT)
RT = (time.time() - start_time) / (
acoustic_features.shape[0] * self.config.frame_period / 1000
)
self.logger.info(f"Real-time factor for acoustic feature prediction: {RT:.3f}")
return acoustic_features
def postprocess_acoustic(
self,
duration_modified_labels,
acoustic_features,
post_filter_type="gv",
trajectory_smoothing=True,
trajectory_smoothing_cutoff=50,
trajectory_smoothing_cutoff_f0=20,
vuv_threshold=0.5,
force_fix_vuv=False,
fill_silence_to_rest=False,
f0_shift_in_cent=0,
):
"""Post-process acoustic features
The function converts acoustic features in single ndarray to tuple of
multi-stream acoustic features.
e.g., array -> (mgc, lf0, vuv, bap)
If post_filter_type=``nnsvs`` is specified, learned post-filter is applied.
However, it is recommended to use ``gv`` in general.
Args:
duration_modified_labels (nnmnkwii.io.hts.HTSLabelFile): HTS labels.
acoustic_features (ndarray): Predicted acoustic features.
post_filter_type (str): Post-filter type.
One of ``gv``, ``merlin`` or ``nnsvs``. Recommended to use ``gv``
for general purpose.
trajectory_smoothing (bool): Whether to apply trajectory smoothing.
trajectory_smoothing_cutoff (float): Cutoff frequency for trajectory smoothing
of spectral features.
trajectory_smoothing_cutoff_f0 (float): Cutoff frequency for trajectory
smoothing of f0.
vuv_threshold (float): V/UV threshold.
force_fix_vuv (bool): Force fix V/UV.
fill_silence_to_rest (bool): Fill silence to rest frames.
f0_shift_in_cent (float): F0 shift in cent.
Returns:
tuple: Post-processed multi-stream acoustic features.
"""
start_time = time.time()
multistream_features = postprocess_acoustic(
device=self.device,
duration_modified_labels=duration_modified_labels,
acoustic_features=acoustic_features,
binary_dict=self.binary_dict,
numeric_dict=self.numeric_dict,
acoustic_config=self.acoustic_config,
acoustic_out_static_scaler=self.acoustic_out_static_scaler,
postfilter_model=self.postfilter_model,
postfilter_config=self.postfilter_config,
postfilter_out_scaler=self.postfilter_out_scaler,
sample_rate=self.sample_rate,
frame_period=self.config.frame_period,
relative_f0=self.config.acoustic.relative_f0,
feature_type=self.feature_type,
post_filter_type=post_filter_type,
trajectory_smoothing=trajectory_smoothing,
trajectory_smoothing_cutoff=trajectory_smoothing_cutoff,
trajectory_smoothing_cutoff_f0=trajectory_smoothing_cutoff_f0,
vuv_threshold=vuv_threshold,
f0_shift_in_cent=f0_shift_in_cent,
vibrato_scale=1.0, # only valid for Sinsy-like models
force_fix_vuv=force_fix_vuv,
fill_silence_to_rest=fill_silence_to_rest,
)
self.logger.info(
f"Elapsed time for acoustic post-processing: {time.time() - start_time:.3f} sec"
)
return multistream_features
def predict_waveform(
self,
multistream_features,
vocoder_type="world",
vuv_threshold=0.5,
):
"""Predict waveform from acoustic features
Args:
multistream_features (tuple): Multi-stream acoustic features.
vocoder_type (str): Vocoder type. One of ``world``, ``pwg`` or ``usfgan``.
If ``auto`` is specified, the vocoder is automatically selected.
vuv_threshold (float): V/UV threshold.
Returns:
ndarray: Predicted waveform.
"""
start_time = time.time()
if vocoder_type in ["pwg", "usfgan"] and self.vocoder is None:
raise ValueError(
"""Pre-trained vocodr model is not found.
WORLD is only supported for waveform generation"""
)
if vocoder_type == "auto":
if self.feature_type == "melf0":
assert self.vocoder is not None
vocoder_type = (
"usfgan" if isinstance(self.vocoder, USFGANWrapper) else "pwg"
)
elif self.feature_type == "world":
if self.vocoder is None:
vocoder_type = "world"
else:
vocoder_type = (
"usfgan" if isinstance(self.vocoder, USFGANWrapper) else "pwg"
)
wav = predict_waveform(
device=self.device,
multistream_features=multistream_features,
vocoder=self.vocoder,
vocoder_config=self.vocoder_config,
vocoder_in_scaler=self.vocoder_in_scaler,
sample_rate=self.sample_rate,
frame_period=self.config.frame_period,
use_world_codec=self.config.get("use_world_codec", False),
feature_type=self.feature_type,
vocoder_type=vocoder_type,
vuv_threshold=vuv_threshold,
)
self.logger.info(
f"Elapsed time for waveform generation: {time.time() - start_time:.3f} sec"
)
RT = (time.time() - start_time) / (len(wav) / self.sample_rate)
self.logger.info(f"Real-time factor for waveform generation: {RT:.3f}")
return wav
def postprocess_waveform(
self,
wav,
dtype=np.int16,
peak_norm=False,
loudness_norm=False,
target_loudness=-20,
):
"""Post-process waveform
Args:
wav (ndarray): Waveform.
dtype (dtype): Data type of waveform.
peak_norm (bool): Whether to apply peak normalization.
loudness_norm (bool): Whether to apply loudness normalization.
target_loudness (float): Target loudness in dB.
Returns:
ndarray: Post-processed waveform.
"""
start_time = time.time()
wav = postprocess_waveform(
wav=wav,
sample_rate=self.sample_rate,
dtype=dtype,
peak_norm=peak_norm,
loudness_norm=loudness_norm,
target_loudness=target_loudness,
)
self.logger.info(
f"Elapsed time for waveform post-processing: {time.time() - start_time:.3f} sec"
)
return wav
def svs(
self,
labels,
vocoder_type="world",
post_filter_type="gv",
trajectory_smoothing=True,
trajectory_smoothing_cutoff=50,
trajectory_smoothing_cutoff_f0=20,
vuv_threshold=0.5,
style_shift=0,
force_fix_vuv=False,
fill_silence_to_rest=False,
dtype=np.int16,
peak_norm=False,
loudness_norm=False,
target_loudness=-20,
segmented_synthesis=False,
):
"""Synthesize waveform from HTS labels.
Args:
labels (nnmnkwii.io.hts.HTSLabelFile): HTS labels
vocoder_type (str): Vocoder type. One of ``world``, ``pwg`` or ``usfgan``.
If ``auto`` is specified, the vocoder is automatically selected.
post_filter_type (str): Post-filter type. ``merlin``, ``gv`` or ``nnsvs``
is supported.
trajectory_smoothing (bool): Whether to smooth acoustic feature trajectory.
trajectory_smoothing_cutoff (int): Cutoff frequency for trajectory smoothing.
trajectory_smoothing_cutoff_f0 (int): Cutoff frequency for trajectory
smoothing of f0.
vuv_threshold (float): Threshold for VUV.
style_shift (int): style shift parameter
force_fix_vuv (bool): Whether to correct VUV.
fill_silence_to_rest (bool): Fill silence to rest frames.
dtype (np.dtype): Data type of the output waveform.
peak_norm (bool): Whether to normalize the waveform by peak value.
loudness_norm (bool): Whether to normalize the waveform by loudness.
target_loudness (float): Target loudness in dB.
segmneted_synthesis (bool): Whether to use segmented synthesis.
"""
start_time = time.time()
vocoder_type = vocoder_type.lower()
if vocoder_type not in ["world", "pwg", "usfgan", "auto"]:
raise ValueError(f"Unknown vocoder type: {vocoder_type}")
if post_filter_type not in ["merlin", "nnsvs", "gv", "none"]:
raise ValueError(f"Unknown post-filter type: {post_filter_type}")
# Predict timinigs
duration_modified_labels = self.predict_timing(labels)
# NOTE: segmented synthesis is not well tested. There MUST be better ways
# to do this.
if segmented_synthesis:
self.logger.warning(
"Segmented synthesis is not well tested. Use it on your own risk."
)
duration_modified_labels_segs = segment_labels(
duration_modified_labels,
# the following parameters are based on experiments in the NNSVS's paper
# tuned with Namine Ritsu's database
silence_threshold=0.1,
min_duration=5.0,
force_split_threshold=5.0,
)
from tqdm.auto import tqdm
else:
duration_modified_labels_segs = [duration_modified_labels]
def tqdm(x, **kwargs):
return x
# Run acoustic model and vocoder
hts_frame_shift = int(self.config.frame_period * 1e4)
wavs = []
self.logger.info(f"Number of segments: {len(duration_modified_labels_segs)}")
for duration_modified_labels_seg in tqdm(
duration_modified_labels_segs,
desc="[segment]",
total=len(duration_modified_labels_segs),
):
duration_modified_labels_seg.frame_shift = hts_frame_shift
# Predict acoustic features
# NOTE: if non-zero pre_f0_shift_in_cent is specified, the input pitch
# will be shifted before running the acoustic model
acoustic_features = self.predict_acoustic(
duration_modified_labels_seg,
f0_shift_in_cent=style_shift * 100,
)
# Post-processing for acoustic features
# NOTE: if non-zero post_f0_shift_in_cent is specified, the output pitch
# will be shifted as a part of post-processing
multistream_features = self.postprocess_acoustic(
acoustic_features=acoustic_features,
duration_modified_labels=duration_modified_labels_seg,
trajectory_smoothing=trajectory_smoothing,
trajectory_smoothing_cutoff=trajectory_smoothing_cutoff,
trajectory_smoothing_cutoff_f0=trajectory_smoothing_cutoff_f0,
force_fix_vuv=force_fix_vuv,
fill_silence_to_rest=fill_silence_to_rest,
f0_shift_in_cent=-style_shift * 100,
)
# Generate waveform by vocoder
wav = self.predict_waveform(
multistream_features=multistream_features,
vocoder_type=vocoder_type,
vuv_threshold=vuv_threshold,
)
wavs.append(wav)
# Concatenate segmented waveforms
wav = np.concatenate(wavs, axis=0).reshape(-1)
# Post-processing for the output waveform
wav = self.postprocess_waveform(
wav,
dtype=dtype,
peak_norm=peak_norm,
loudness_norm=loudness_norm,
target_loudness=target_loudness,
)
self.logger.info(f"Total time: {time.time() - start_time:.3f} sec")
RT = (time.time() - start_time) / (len(wav) / self.sample_rate)
self.logger.info(f"Total real-time factor: {RT:.3f}")
return wav, self.sample_rate
def _warn_if_model_is_old(logger):
logger.warning(
"""It is likely you have trained you model with old NNSVS.
It is recommended to retrain your model with the latest version of NNSVS."""
)
class NEUTRINO(SPSVS):
"""NEUTRINO-like interface for singing voice synthesis
Args:
model_dir (str): model directory
device (str): device name
verbose (int): verbose level
"""
def __init__(self, model_dir, device="cpu", verbose=0):
super().__init__(model_dir, device=device, verbose=verbose)
if self.feature_type != "world":
raise RuntimeError(f"Unsupported feature type: {self.feature_type}")
if not self.config.get("use_world_codec", False):
self.logger.warning(
"WORLD coded is required to output NEUTRIN-compatible features"
)
_warn_if_model_is_old(self.logger)
@classmethod
def musicxml2label(cls, input_file):
"""Convert musicXML to full and mono HTS labels
Args:
input_file (str): musicXML file
"""
import pysinsy
contexts = pysinsy.extract_fullcontext(input_file)
full_labels = hts.HTSLabelFile.create_from_contexts(contexts)
mono_labels = full_to_mono(full_labels)
return full_labels, mono_labels
def get_num_phrases(self, labels):
"""Get number of phrases
Args:
labels (nnmnkwii.io.hts.HTSLabelFile): HTS label
Returns:
int: number of phrases
"""
phrases = label2phrases(labels)
return len(phrases)
def get_phraselist(self, full_labels, timing_labels):
"""Get phraselit from full and timing HTS labels
Args:
full_labels (nnmnkwii.io.hts.HTSLabelFile): full HTS label
timing_labels (nnmnkwii.io.hts.HTSLabelFile): timing HTS label
Returns:
str: phraselist
"""
note_indices = get_note_indices(full_labels)
phraselist = label2phrases_str(timing_labels, note_indices)
return phraselist
def predict_acoustic(
self,
full_labels,
timing_labels=None,
style_shift=0,
phrase_num=-1,
trajectory_smoothing=True,
trajectory_smoothing_cutoff=50,
trajectory_smoothing_cutoff_f0=20,
vuv_threshold=0.5,
force_fix_vuv=False,
fill_silence_to_rest=False,
):
"""Main inference of timing and acoustic predictions
Args:
full_labels (nnmnkwii.io.hts.HTSLabelFile): full HTS label
timing_labels (nnmnkwii.io.hts.HTSLabelFile): timing HTS label
style_shift (int): style shift parameter
phrase_num (int): phrase number to use for inference
trajectory_smoothing (bool): whether to apply trajectory smoothing
trajectory_smoothing_cutoff (float): cutoff frequency for trajectory smoothing
trajectory_smoothing_cutoff_f0 (float): cutoff frequency for trajectory
smoothing for f0
vuv_threshold (float): V/UV threshold
force_fix_vuv (bool): whether to force fix V/UV
fill_silence_to_rest (bool): Fill silence to rest frames.
Returns:
tuple: (f0, mgc, bap)
"""
if timing_labels is None:
self.logger.warning("'timing_labels' is not provided.")
# Run timing prediction
duration_modified_full_labels = self.predict_timing(full_labels)
timing_labels = full_to_mono(duration_modified_full_labels)
else:
# Load pre-estimated timing
duration_modified_full_labels = deepcopy(full_labels)
duration_modified_full_labels.start_times = timing_labels.start_times.copy()
duration_modified_full_labels.end_times = timing_labels.end_times.copy()
if phrase_num >= 0:
phrases = label2phrases(duration_modified_full_labels)
if phrase_num > len(phrases):
raise RuntimeError(
f"phrase_num is too large: {phrase_num} > {len(phrases)}"
)
# Use the specified phrase for inference
duration_modified_full_labels = phrases[phrase_num]
self.logger.info(f"Using phrase {phrase_num}/{len(phrases)} for inference")
# Predict acoustic features
# NOTE: if non-zero pre_f0_shift_in_cent is specified, the input pitch
# will be shifted before running the acoustic model
acoustic_features = super().predict_acoustic(
duration_modified_full_labels,
f0_shift_in_cent=style_shift * 100,
)
# Post-processing for acoustic features
# NOTE: if non-zero post_f0_shift_in_cent is specified, the output pitch
# will be shifted as a part of post-processing
multistream_features = super().postprocess_acoustic(
acoustic_features=acoustic_features,
duration_modified_labels=duration_modified_full_labels,
trajectory_smoothing=trajectory_smoothing,
trajectory_smoothing_cutoff=trajectory_smoothing_cutoff,
trajectory_smoothing_cutoff_f0=trajectory_smoothing_cutoff_f0,
vuv_threshold=vuv_threshold,
force_fix_vuv=force_fix_vuv,
fill_silence_to_rest=fill_silence_to_rest,
f0_shift_in_cent=-style_shift * 100,
)
assert len(multistream_features) == 4
mgc, lf0, vuv, bap = multistream_features
if not self.config.get("use_world_codec", False):
self.logger.warning(
"""use_world_codec is not set.
WORLD (NEUTRINO edition) does not work with the output of this model.
"""
)
# Convert lf0 to f0
f0 = np.exp(lf0.copy())
f0[vuv < vuv_threshold] = 0
# NOTE: Neutrino-compatible MGC should have negative values at the 0-th coefficient.
if mgc[:, 0].mean() > 0:
self.logger.warning("MGC 0-th coefficient is positive.")
_warn_if_model_is_old(self.logger)
# Make sure to have correct array layout and dtype
# These parameters can be used to generate waveform by WORLD
f0 = np.ascontiguousarray(f0).astype(np.float64)
mgc = np.ascontiguousarray(mgc).astype(np.float64)
bap = np.ascontiguousarray(bap).astype(np.float64)
return f0, mgc, bap
def predict_waveform(
self,
f0,
mgc,
bap,
vocoder_type="world",
vuv_threshold=0.5,
dtype=np.int16,
peak_norm=False,
loudness_norm=False,
target_loudness=-20,
):
"""Generate waveform from acoustic features
Args:
f0 (ndarray): f0
mgc (ndarray): mel-cepstrum
bap (ndarray): band-aperiodicity
vocoder_type (str): vocoder type
vuv_threshold (float): V/UV threshold
dtype (np.dtype): Data type of the output waveform.
peak_norm (bool): Whether to normalize the waveform by peak value.
loudness_norm (bool): Whether to normalize the waveform by loudness.
target_loudness (float): Target loudness in dB.
Returns:
ndarray: waveform
"""
# Convert NEUTRINO-like features to NNSVS's one
# (f0, mgc, bap) -> (mgc, lf0, vuv, bap)
vuv = (f0 > 0).astype(np.float64).reshape(-1, 1)
lf0 = f0.copy()
lf0[np.nonzero(lf0)] = np.log(f0[np.nonzero(lf0)])
lf0 = interp1d(lf0, kind="slinear")
multistream_features = (mgc, lf0, vuv, bap)
wav = super().predict_waveform(
multistream_features=multistream_features,
vocoder_type=vocoder_type,
vuv_threshold=vuv_threshold,
)
wav = self.postprocess_waveform(
wav,
dtype=dtype,
peak_norm=peak_norm,
loudness_norm=loudness_norm,
target_loudness=target_loudness,
)
return wav
def svs(self, labels):
"""Synthesize wavefrom from HTS labels
Args:
labels (nnmnkwii.io.hts.HTSLabelFile): HTS labels
Returns:
tuple: (waveform, sample_rate)
"""
self.logger.warning(
"Use `predict_acoustic` and `predict_waveform` methods instead."
)
f0, mgc, bap = self.predict_acoustic(labels)
wav = self.predict_waveform(f0, mgc, bap)
return wav, self.sample_rate
| 37,016 | 36.657172 | 95 | py |
nnsvs | nnsvs-master/nnsvs/model.py | from warnings import warn
import torch
from nnsvs.base import BaseModel, PredictionType
from nnsvs.dsp import TrTimeInvFIRFilter
from nnsvs.layers.conv import ResnetBlock, WNConv1d
from nnsvs.layers.layer_norm import LayerNorm
from nnsvs.mdn import MDNLayer, mdn_get_most_probable_sigma_and_mu
from nnsvs.multistream import split_streams
from nnsvs.transformer.attentions import sequence_mask
from nnsvs.transformer.encoder import Encoder as _TransformerEncoder
from nnsvs.util import init_weights
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
__all__ = [
"FFN",
"LSTMRNN",
"LSTMRNNSAR",
"MDN",
"MDNv2",
"RMDN",
"Conv1dResnet",
"Conv1dResnetMDN",
"Conv1dResnetSAR",
"FFConvLSTM",
"LSTMEncoder",
"VariancePredictor",
"TransformerEncoder",
]
class Conv1dResnet(BaseModel):
"""Conv1d + Resnet
The model is inspired by the MelGAN's model architecture (:cite:t:`kumar2019melgan`).
MDN layer is added if use_mdn is True.
Args:
in_dim (int): the dimension of the input
hidden_dim (int): the dimension of the hidden state
out_dim (int): the dimension of the output
num_layers (int): the number of layers
init_type (str): the type of weight initialization
use_mdn (bool): whether to use MDN or not
num_gaussians (int): the number of gaussians in MDN
dim_wise (bool): whether to use dim-wise or not
"""
def __init__(
self,
in_dim,
hidden_dim,
out_dim,
num_layers=4,
init_type="none",
use_mdn=False,
num_gaussians=8,
dim_wise=False,
in_ph_start_idx: int = 1,
in_ph_end_idx: int = 50,
embed_dim=None,
**kwargs,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.use_mdn = use_mdn
self.in_ph_start_idx = in_ph_start_idx
self.in_ph_end_idx = in_ph_end_idx
self.num_vocab = in_ph_end_idx - in_ph_start_idx
self.embed_dim = embed_dim
if "dropout" in kwargs:
warn(
"dropout argument in Conv1dResnet is deprecated"
" and will be removed in future versions"
)
if self.embed_dim is not None:
assert in_dim > self.num_vocab
self.emb = nn.Embedding(self.num_vocab, embed_dim)
self.fc_in = nn.Linear(in_dim - self.num_vocab, embed_dim)
conv_in_dim = embed_dim
else:
conv_in_dim = in_dim
model = [
nn.ReflectionPad1d(3),
WNConv1d(conv_in_dim, hidden_dim, kernel_size=7, padding=0),
]
for n in range(num_layers):
model.append(ResnetBlock(hidden_dim, dilation=2 ** n))
last_conv_out_dim = hidden_dim if use_mdn else out_dim
model += [
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(3),
WNConv1d(hidden_dim, last_conv_out_dim, kernel_size=7, padding=0),
]
self.model = nn.Sequential(*model)
if self.use_mdn:
self.mdn_layer = MDNLayer(
in_dim=hidden_dim,
out_dim=out_dim,
num_gaussians=num_gaussians,
dim_wise=dim_wise,
)
else:
self.mdn_layer = None
init_weights(self, init_type)
def prediction_type(self):
return (
PredictionType.PROBABILISTIC
if self.use_mdn
else PredictionType.DETERMINISTIC
)
def forward(self, x, lengths=None, y=None):
"""Forward step
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
y (torch.Tensor): the optional target tensor
Returns:
torch.Tensor: the output tensor
"""
if self.embed_dim is not None:
x_first, x_ph_onehot, x_last = torch.split(
x,
[
self.in_ph_start_idx,
self.num_vocab,
self.in_dim - self.num_vocab - self.in_ph_start_idx,
],
dim=-1,
)
x_ph = torch.argmax(x_ph_onehot, dim=-1)
# Make sure to have one-hot vector
assert (x_ph_onehot.sum(-1) <= 1).all()
x = self.emb(x_ph) + self.fc_in(torch.cat([x_first, x_last], dim=-1))
out = self.model(x.transpose(1, 2)).transpose(1, 2)
if self.use_mdn:
return self.mdn_layer(out)
else:
return out
def inference(self, x, lengths=None):
"""Inference step
Find the most likely mean and variance if use_mdn is True
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
Returns:
tuple: mean and variance of the output features
"""
if self.use_mdn:
log_pi, log_sigma, mu = self(x, lengths)
sigma, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
return mu, sigma
else:
return self(x, lengths)
@torch.no_grad()
def _shallow_ar_inference(out, stream_sizes, analysis_filts):
from torchaudio.functional import lfilter
out_streams = split_streams(out, stream_sizes)
# back to conv1d friendly (B, C, T) format
out_streams = map(lambda x: x.transpose(1, 2), out_streams)
out_syn = []
for sidx, os in enumerate(out_streams):
out_stream_syn = torch.zeros_like(os)
a = analysis_filts[sidx].get_filt_coefs()
# apply IIR filter for each dimiesion
for idx in range(os.shape[1]):
# NOTE: scipy.signal.lfilter accespts b, a in order,
# but torchaudio expect the oppsite; a, b in order
ai = a[idx].view(-1).flip(0)
bi = torch.zeros_like(ai)
bi[0] = 1
out_stream_syn[:, idx, :] = lfilter(os[:, idx, :], ai, bi, clamp=False)
out_syn += [out_stream_syn]
out_syn = torch.cat(out_syn, 1)
return out_syn.transpose(1, 2)
class Conv1dResnetSAR(Conv1dResnet):
"""Conv1dResnet with shallow AR structure
Args:
in_dim (int): the dimension of the input
hidden_dim (int): the dimension of the hidden state
out_dim (int): the dimension of the output
num_layers (int): the number of layers
stream_sizes (list): Stream sizes
ar_orders (list): Filter dimensions for each stream.
init_type (str): the type of weight initialization
"""
def __init__(
self,
in_dim,
hidden_dim,
out_dim,
num_layers=4,
stream_sizes=None,
ar_orders=None,
init_type="none",
**kwargs,
):
super().__init__(
in_dim=in_dim, hidden_dim=hidden_dim, out_dim=out_dim, num_layers=num_layers
)
if "dropout" in kwargs:
warn(
"dropout argument in Conv1dResnetSAR is deprecated"
" and will be removed in future versions"
)
if stream_sizes is None:
stream_sizes = [180, 3, 1, 15]
if ar_orders is None:
ar_orders = [20, 200, 20, 20]
self.stream_sizes = stream_sizes
init_weights(self, init_type)
self.analysis_filts = nn.ModuleList()
for s, K in zip(stream_sizes, ar_orders):
self.analysis_filts += [TrTimeInvFIRFilter(s, K + 1)]
def preprocess_target(self, y):
assert sum(self.stream_sizes) == y.shape[-1]
ys = split_streams(y, self.stream_sizes)
for idx, yi in enumerate(ys):
ys[idx] = self.analysis_filts[idx](yi.transpose(1, 2)).transpose(1, 2)
return torch.cat(ys, -1)
def inference(self, x, lengths=None, y=None):
out = self.model(x.transpose(1, 2)).transpose(1, 2)
return _shallow_ar_inference(out, self.stream_sizes, self.analysis_filts)
class FFN(BaseModel):
"""Feed-forward network
Args:
in_dim (int): the dimension of the input
hidden_dim (int): the dimension of the hidden state
out_dim (int): the dimension of the output
num_layers (int): the number of layers
dropout (float): dropout rate
init_type (str): the type of weight initialization
last_sigmoid (bool): whether to apply sigmoid on the output
"""
def __init__(
self,
in_dim,
hidden_dim,
out_dim,
num_layers=2,
dropout=0.0,
init_type="none",
last_sigmoid=False,
):
super(FFN, self).__init__()
self.first_linear = nn.Linear(in_dim, hidden_dim)
self.hidden_layers = nn.ModuleList(
[nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers)]
)
self.last_linear = nn.Linear(hidden_dim, out_dim)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.last_sigmoid = last_sigmoid
init_weights(self, init_type)
def forward(self, x, lengths=None, y=None):
"""Forward step
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
y (torch.Tensor): the optional target tensor
Returns:
torch.Tensor: the output tensor
"""
h = self.relu(self.first_linear(x))
for hl in self.hidden_layers:
h = self.dropout(self.relu(hl(h)))
out = self.last_linear(h)
out = torch.sigmoid(out) if self.last_sigmoid else out
return out
# For compatibility
FeedForwardNet = FFN
class LSTMRNN(BaseModel):
"""LSTM-based recurrent neural network
Args:
in_dim (int): the dimension of the input
hidden_dim (int): the dimension of the hidden state
out_dim (int): the dimension of the output
num_layers (int): the number of layers
bidirectional (bool): whether to use bidirectional LSTM
dropout (float): dropout rate
init_type (str): the type of weight initialization
"""
def __init__(
self,
in_dim,
hidden_dim,
out_dim,
num_layers=1,
bidirectional=True,
dropout=0.0,
init_type="none",
):
super(LSTMRNN, self).__init__()
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.num_direction = 2 if bidirectional else 1
self.lstm = nn.LSTM(
in_dim,
hidden_dim,
num_layers,
bidirectional=bidirectional,
batch_first=True,
dropout=dropout,
)
self.hidden2out = nn.Linear(self.num_direction * self.hidden_dim, out_dim)
init_weights(self, init_type)
def forward(self, x, lengths, y=None):
"""Forward step
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
y (torch.Tensor): the optional target tensor
Returns:
torch.Tensor: the output tensor
"""
if isinstance(lengths, torch.Tensor):
lengths = lengths.to("cpu")
x = pack_padded_sequence(x, lengths, batch_first=True)
out, _ = self.lstm(x)
out, _ = pad_packed_sequence(out, batch_first=True)
out = self.hidden2out(out)
return out
class LSTMRNNSAR(LSTMRNN):
"""LSTM-RNN with shallow AR structure
Args:
in_dim (int): the dimension of the input
hidden_dim (int): the dimension of the hidden state
out_dim (int): the dimension of the output
num_layers (int): the number of layers
bidirectional (bool): whether to use bidirectional LSTM
dropout (float): dropout rate
stream_sizes (list): Stream sizes
ar_orders (list): Filter dimensions for each stream.
init_type (str): the type of weight initialization
"""
def __init__(
self,
in_dim,
hidden_dim,
out_dim,
num_layers=1,
bidirectional=True,
dropout=0.0,
stream_sizes=None,
ar_orders=None,
init_type="none",
):
super().__init__(
in_dim, hidden_dim, out_dim, num_layers, bidirectional, dropout, init_type
)
if stream_sizes is None:
stream_sizes = [180, 3, 1, 15]
if ar_orders is None:
ar_orders = [20, 200, 20, 20]
self.stream_sizes = stream_sizes
self.analysis_filts = nn.ModuleList()
for s, K in zip(stream_sizes, ar_orders):
self.analysis_filts += [TrTimeInvFIRFilter(s, K + 1)]
def preprocess_target(self, y):
assert sum(self.stream_sizes) == y.shape[-1]
ys = split_streams(y, self.stream_sizes)
for idx, yi in enumerate(ys):
ys[idx] = self.analysis_filts[idx](yi.transpose(1, 2)).transpose(1, 2)
return torch.cat(ys, -1)
def inference(self, x, lengths=None, y=None):
out = self.forward(x, lengths)
return _shallow_ar_inference(out, self.stream_sizes, self.analysis_filts)
class RMDN(BaseModel):
"""RNN-based mixture density networks (MDN)
Args:
in_dim (int): the dimension of the input
hidden_dim (int): the dimension of the hidden state
out_dim (int): the dimension of the output
num_layers (int): the number of layers
bidirectional (bool): whether to use bidirectional LSTM
dropout (float): dropout rate
num_gaussians (int): the number of gaussians
dim_wise (bool): whether to use dimension-wise or not
init_type (str): the type of weight initialization
"""
def __init__(
self,
in_dim,
hidden_dim,
out_dim,
num_layers=1,
bidirectional=True,
dropout=0.0,
num_gaussians=8,
dim_wise=False,
init_type="none",
):
super(RMDN, self).__init__()
self.linear = nn.Linear(in_dim, hidden_dim)
self.relu = nn.ReLU()
self.num_direction = 2 if bidirectional else 1
self.lstm = nn.LSTM(
hidden_dim,
hidden_dim,
num_layers,
bidirectional=bidirectional,
batch_first=True,
dropout=dropout,
)
self.mdn = MDNLayer(
in_dim=self.num_direction * hidden_dim,
out_dim=out_dim,
num_gaussians=num_gaussians,
dim_wise=dim_wise,
)
init_weights(self, init_type)
def prediction_type(self):
return PredictionType.PROBABILISTIC
def forward(self, x, lengths, y=None):
"""Forward step
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
y (torch.Tensor): the optional target tensor
Returns:
torch.Tensor: the output tensor
"""
if isinstance(lengths, torch.Tensor):
lengths = lengths.to("cpu")
out = self.linear(x)
sequence = pack_padded_sequence(self.relu(out), lengths, batch_first=True)
out, _ = self.lstm(sequence)
out, _ = pad_packed_sequence(out, batch_first=True)
out = self.mdn(out)
return out
def inference(self, x, lengths=None):
"""Inference step
Find the most likely mean and variance
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
Returns:
tuple: mean and variance of the output features
"""
log_pi, log_sigma, mu = self.forward(x, lengths)
sigma, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
return mu, sigma
class MDN(BaseModel):
"""Mixture density networks (MDN) with FFN
.. warning::
It is recommended to use MDNv2 instead, unless you want to
fine-turn from a old checkpoint of MDN.
Args:
in_dim (int): the dimension of the input
hidden_dim (int): the dimension of the hidden state
out_dim (int): the dimension of the output
num_layers (int): the number of layers
num_gaussians (int): the number of gaussians
dim_wise (bool): whether to use dimension-wise or not
init_type (str): the type of weight initialization
"""
def __init__(
self,
in_dim,
hidden_dim,
out_dim,
num_layers=1,
num_gaussians=8,
dim_wise=False,
init_type="none",
**kwargs,
):
super(MDN, self).__init__()
if "dropout" in kwargs:
warn(
"dropout argument in MDN is deprecated"
" and will be removed in future versions"
)
model = [nn.Linear(in_dim, hidden_dim), nn.ReLU()]
if num_layers > 1:
for _ in range(num_layers - 1):
model += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU()]
model += [
MDNLayer(
in_dim=hidden_dim,
out_dim=out_dim,
num_gaussians=num_gaussians,
dim_wise=dim_wise,
)
]
self.model = nn.Sequential(*model)
init_weights(self, init_type)
def prediction_type(self):
return PredictionType.PROBABILISTIC
def forward(self, x, lengths=None, y=None):
"""Forward step
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
y (torch.Tensor): the optional target tensor
Returns:
torch.Tensor: the output tensor
"""
return self.model(x)
def inference(self, x, lengths=None):
"""Inference step
Find the most likely mean and variance
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
Returns:
tuple: mean and variance of the output features
"""
log_pi, log_sigma, mu = self.forward(x, lengths)
sigma, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
return mu, sigma
class MDNv2(BaseModel):
"""Mixture density networks (MDN) with FFN
MDN (v1) + Dropout
Args:
in_dim (int): the dimension of the input
hidden_dim (int): the dimension of the hidden state
out_dim (int): the dimension of the output
num_layers (int): the number of layers
dropout (float): dropout rate
num_gaussians (int): the number of gaussians
dim_wise (bool): whether to use dimension-wise or not
init_type (str): the type of weight initialization
"""
def __init__(
self,
in_dim,
hidden_dim,
out_dim,
num_layers=1,
dropout=0.5,
num_gaussians=8,
dim_wise=False,
init_type="none",
):
super(MDNv2, self).__init__()
model = [nn.Linear(in_dim, hidden_dim), nn.ReLU(), nn.Dropout(dropout)]
if num_layers > 1:
for _ in range(num_layers - 1):
model += [
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Dropout(dropout),
]
model += [
MDNLayer(
in_dim=hidden_dim,
out_dim=out_dim,
num_gaussians=num_gaussians,
dim_wise=dim_wise,
)
]
self.model = nn.Sequential(*model)
init_weights(self, init_type)
def prediction_type(self):
return PredictionType.PROBABILISTIC
def forward(self, x, lengths=None, y=None):
"""Forward step
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
y (torch.Tensor): the optional target tensor
Returns:
torch.Tensor: the output tensor
"""
return self.model(x)
def inference(self, x, lengths=None):
"""Inference step
Find the most likely mean and variance
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
Returns:
tuple: mean and variance of the output features
"""
log_pi, log_sigma, mu = self.forward(x, lengths)
sigma, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
return mu, sigma
class Conv1dResnetMDN(BaseModel):
"""Conv1dResnet with MDN output layer
.. warning::
Will be removed in v0.1.0. Use Conv1dResNet with ``use_mdn=True`` instead.
"""
def __init__(
self,
in_dim,
hidden_dim,
out_dim,
num_layers=4,
num_gaussians=8,
dim_wise=False,
init_type="none",
**kwargs,
):
super().__init__()
if "dropout" in kwargs:
warn(
"dropout argument in Conv1dResnet is deprecated"
" and will be removed in future versions"
)
model = [
Conv1dResnet(
in_dim=in_dim,
hidden_dim=hidden_dim,
out_dim=hidden_dim,
num_layers=num_layers,
),
nn.ReLU(),
MDNLayer(
in_dim=hidden_dim,
out_dim=out_dim,
num_gaussians=num_gaussians,
dim_wise=dim_wise,
),
]
self.model = nn.Sequential(*model)
init_weights(self, init_type)
def prediction_type(self):
return PredictionType.PROBABILISTIC
def forward(self, x, lengths=None, y=None):
"""Forward step
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
y (torch.Tensor): the optional target tensor
Returns:
torch.Tensor: the output tensor
"""
return self.model(x)
def inference(self, x, lengths=None):
"""Inference step
Find the most likely mean and variance
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
Returns:
tuple: mean and variance of the output features
"""
log_pi, log_sigma, mu = self.forward(x, lengths)
sigma, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
return mu, sigma
class FFConvLSTM(BaseModel):
"""FFN + Conv1d + LSTM
A model proposed in :cite:t:`hono2021sinsy` without residual F0 prediction.
Args:
in_dim (int): the dimension of the input
ff_hidden_dim (int): the dimension of the hidden state of the FFN
conv_hidden_dim (int): the dimension of the hidden state of the conv1d
lstm_hidden_dim (int): the dimension of the hidden state of the LSTM
out_dim (int): the dimension of the output
dropout (float): dropout rate
num_lstm_layers (int): the number of layers of the LSTM
bidirectional (bool): whether to use bidirectional LSTM
init_type (str): the type of weight initialization
use_mdn (bool): whether to use MDN or not
dim_wise (bool): whether to use dimension-wise or not
num_gaussians (int): the number of gaussians
in_ph_start_idx (int): the start index of phoneme identity in a hed file
in_ph_end_idx (int): the end index of phoneme identity in a hed file
embed_dim (int): the dimension of the phoneme embedding
"""
def __init__(
self,
in_dim,
ff_hidden_dim=2048,
conv_hidden_dim=1024,
lstm_hidden_dim=256,
out_dim=67,
dropout=0.0,
num_lstm_layers=2,
bidirectional=True,
init_type="none",
use_mdn=False,
dim_wise=True,
num_gaussians=4,
in_ph_start_idx: int = 1,
in_ph_end_idx: int = 50,
embed_dim=None,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.in_ph_start_idx = in_ph_start_idx
self.in_ph_end_idx = in_ph_end_idx
self.num_vocab = in_ph_end_idx - in_ph_start_idx
self.embed_dim = embed_dim
self.use_mdn = use_mdn
if self.embed_dim is not None:
assert in_dim > self.num_vocab
self.emb = nn.Embedding(self.num_vocab, embed_dim)
self.fc_in = nn.Linear(in_dim - self.num_vocab, embed_dim)
ff_in_dim = embed_dim
else:
ff_in_dim = in_dim
self.ff = nn.Sequential(
nn.Linear(ff_in_dim, ff_hidden_dim),
nn.ReLU(),
nn.Linear(ff_hidden_dim, ff_hidden_dim),
nn.ReLU(),
nn.Linear(ff_hidden_dim, ff_hidden_dim),
nn.ReLU(),
)
self.conv = nn.Sequential(
nn.ReflectionPad1d(3),
nn.Conv1d(ff_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
nn.ReflectionPad1d(3),
nn.Conv1d(conv_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
nn.ReflectionPad1d(3),
nn.Conv1d(conv_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
)
num_direction = 2 if bidirectional else 1
self.lstm = nn.LSTM(
conv_hidden_dim,
lstm_hidden_dim,
num_lstm_layers,
bidirectional=True,
batch_first=True,
dropout=dropout,
)
last_in_dim = num_direction * lstm_hidden_dim
if self.use_mdn:
assert dim_wise
self.fc = MDNLayer(
in_dim=last_in_dim,
out_dim=out_dim,
num_gaussians=num_gaussians,
dim_wise=dim_wise,
)
else:
self.fc = nn.Linear(last_in_dim, out_dim)
init_weights(self, init_type)
def prediction_type(self):
return (
PredictionType.PROBABILISTIC
if self.use_mdn
else PredictionType.DETERMINISTIC
)
def forward(self, x, lengths=None, y=None):
if isinstance(lengths, torch.Tensor):
lengths = lengths.to("cpu")
if self.embed_dim is not None:
x_first, x_ph_onehot, x_last = torch.split(
x,
[
self.in_ph_start_idx,
self.num_vocab,
self.in_dim - self.num_vocab - self.in_ph_start_idx,
],
dim=-1,
)
x_ph = torch.argmax(x_ph_onehot, dim=-1)
# Make sure to have one-hot vector
assert (x_ph_onehot.sum(-1) <= 1).all()
x = self.emb(x_ph) + self.fc_in(torch.cat([x_first, x_last], dim=-1))
out = self.ff(x)
out = self.conv(out.transpose(1, 2)).transpose(1, 2)
sequence = pack_padded_sequence(out, lengths, batch_first=True)
out, _ = self.lstm(sequence)
out, _ = pad_packed_sequence(out, batch_first=True)
return self.fc(out)
def inference(self, x, lengths=None):
if self.use_mdn:
log_pi, log_sigma, mu = self(x, lengths)
sigma, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
return mu, sigma
else:
return self(x, lengths)
class VariancePredictor(BaseModel):
"""Variance predictor in :cite:t:`ren2020fastspeech`.
The model is composed of stacks of Conv1d + ReLU + LayerNorm layers.
The model can be used for duration or pitch prediction.
Args:
in_dim (int): the input dimension
out_dim (int): the output dimension
num_layers (int): the number of layers
hidden_dim (int): the hidden dimension
kernel_size (int): the kernel size
dropout (float): the dropout rate
init_type (str): the initialization type
use_mdn (bool): whether to use MDN or not
num_gaussians (int): the number of gaussians
dim_wise (bool): whether to use dim-wise or not
in_ph_start_idx (int): the start index of phoneme identity in a hed file
in_ph_end_idx (int): the end index of phoneme identity in a hed file
embed_dim (int): the dimension of the phoneme embedding
mask_indices (list): the input feature indices to be masked.
e.g., specify pitch_idx to mask pitch features.
"""
def __init__(
self,
in_dim,
out_dim,
num_layers=5,
hidden_dim=256,
kernel_size=5,
dropout=0.5,
init_type="none",
use_mdn=False,
num_gaussians=1,
dim_wise=False,
in_ph_start_idx: int = 1,
in_ph_end_idx: int = 50,
embed_dim=None,
mask_indices=None,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.use_mdn = use_mdn
self.in_ph_start_idx = in_ph_start_idx
self.in_ph_end_idx = in_ph_end_idx
self.num_vocab = in_ph_end_idx - in_ph_start_idx
self.embed_dim = embed_dim
self.use_mdn = use_mdn
self.mask_indices = mask_indices
if self.embed_dim is not None:
assert in_dim > self.num_vocab
self.emb = nn.Embedding(self.num_vocab, embed_dim)
self.fc_in = nn.Linear(in_dim - self.num_vocab, embed_dim)
in_dim = embed_dim
conv = nn.ModuleList()
for idx in range(num_layers):
in_channels = in_dim if idx == 0 else hidden_dim
conv += [
nn.Sequential(
nn.Conv1d(
in_channels,
hidden_dim,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
),
nn.ReLU(),
LayerNorm(hidden_dim, dim=1),
nn.Dropout(dropout),
)
]
self.conv = nn.Sequential(*conv)
if self.use_mdn:
self.mdn_layer = MDNLayer(
hidden_dim, out_dim, num_gaussians=num_gaussians, dim_wise=dim_wise
)
else:
self.fc = nn.Linear(hidden_dim, out_dim)
init_weights(self, init_type)
def prediction_type(self):
return (
PredictionType.PROBABILISTIC
if self.use_mdn
else PredictionType.DETERMINISTIC
)
def forward(self, x, lengths=None, y=None):
"""Forward step
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
y (torch.Tensor): the optional target tensor
Returns:
torch.Tensor: the output tensor
"""
# Masking specified features
if self.mask_indices is not None:
for idx in self.mask_indices:
x[:, :, idx] *= 0.0
if self.embed_dim is not None:
x_first, x_ph_onehot, x_last = torch.split(
x,
[
self.in_ph_start_idx,
self.num_vocab,
self.in_dim - self.num_vocab - self.in_ph_start_idx,
],
dim=-1,
)
x_ph = torch.argmax(x_ph_onehot, dim=-1)
# Make sure to have one-hot vector
assert (x_ph_onehot.sum(-1) <= 1).all()
x = self.emb(x_ph) + self.fc_in(torch.cat([x_first, x_last], dim=-1))
out = self.conv(x.transpose(1, 2)).transpose(1, 2)
if self.use_mdn:
return self.mdn_layer(out)
else:
return self.fc(out)
def inference(self, x, lengths=None):
"""Inference step
Find the most likely mean and variance if use_mdn is True
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
Returns:
tuple: mean and variance of the output features
"""
if self.use_mdn:
log_pi, log_sigma, mu = self(x, lengths)
sigma, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
return mu, sigma
else:
return self(x, lengths)
class LSTMEncoder(BaseModel):
"""LSTM encoder
A simple LSTM-based encoder
Args:
in_dim (int): the input dimension
hidden_dim (int): the hidden dimension
out_dim (int): the output dimension
num_layers (int): the number of layers
bidirectional (bool): whether to use bidirectional or not
dropout (float): the dropout rate
init_type (str): the initialization type
in_ph_start_idx (int): the start index of phonetic context in a hed file
in_ph_end_idx (int): the end index of phonetic context in a hed file
embed_dim (int): the embedding dimension
"""
def __init__(
self,
in_dim: int,
hidden_dim: int,
out_dim: int,
num_layers: int = 1,
bidirectional: bool = True,
dropout: float = 0.0,
init_type: str = "none",
in_ph_start_idx: int = 1,
in_ph_end_idx: int = 50,
embed_dim=None,
):
super(LSTMEncoder, self).__init__()
self.in_dim = in_dim
self.in_ph_start_idx = in_ph_start_idx
self.in_ph_end_idx = in_ph_end_idx
self.num_vocab = in_ph_end_idx - in_ph_start_idx
self.embed_dim = embed_dim
if self.embed_dim is not None:
assert in_dim > self.num_vocab
self.emb = nn.Embedding(self.num_vocab, embed_dim)
self.fc_in = nn.Linear(in_dim - self.num_vocab, embed_dim)
lstm_in_dim = embed_dim
else:
lstm_in_dim = in_dim
self.num_layers = num_layers
num_direction = 2 if bidirectional else 1
self.lstm = nn.LSTM(
lstm_in_dim,
hidden_dim,
num_layers,
bidirectional=bidirectional,
batch_first=True,
dropout=dropout,
)
self.hidden2out = nn.Linear(num_direction * hidden_dim, out_dim)
init_weights(self, init_type)
def forward(self, x, lengths, y=None):
if self.embed_dim is not None:
x_first, x_ph_onehot, x_last = torch.split(
x,
[
self.in_ph_start_idx,
self.num_vocab,
self.in_dim - self.num_vocab - self.in_ph_start_idx,
],
dim=-1,
)
x_ph = torch.argmax(x_ph_onehot, dim=-1)
# Make sure to have one-hot vector
assert (x_ph_onehot.sum(-1) <= 1).all()
x = self.emb(x_ph) + self.fc_in(torch.cat([x_first, x_last], dim=-1))
if isinstance(lengths, torch.Tensor):
lengths = lengths.to("cpu")
x = pack_padded_sequence(x, lengths, batch_first=True)
out, _ = self.lstm(x)
out, _ = pad_packed_sequence(out, batch_first=True)
out = self.hidden2out(out)
return out
class TransformerEncoder(BaseModel):
"""Transformer encoder
.. warning::
So far this is not well tested. Maybe be removed in the future.
Args:
in_dim (int): the input dimension
out_dim (int): the output dimension
hidden_dim (int): the hidden dimension
attention_dim (int): the attention dimension
num_heads (int): the number of heads
num_layers (int): the number of layers
kernel_size (int): the kernel size
dropout (float): the dropout rate
reduction_factor (int): the reduction factor
init_type (str): the initialization type
downsample_by_conv (bool): whether to use convolutional downsampling or not
in_ph_start_idx (int): the start index of phonetic context in a hed file
in_ph_end_idx (int): the end index of phonetic context in a hed file
embed_dim (int): the embedding dimension
"""
def __init__(
self,
in_dim,
out_dim,
hidden_dim,
attention_dim,
num_heads=2,
num_layers=2,
kernel_size=3,
dropout=0.1,
reduction_factor=1,
init_type="none",
downsample_by_conv=False,
in_ph_start_idx: int = 1,
in_ph_end_idx: int = 50,
embed_dim=None,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.in_ph_start_idx = in_ph_start_idx
self.in_ph_end_idx = in_ph_end_idx
self.num_vocab = in_ph_end_idx - in_ph_start_idx
self.embed_dim = embed_dim
if self.embed_dim is not None:
assert in_dim > self.num_vocab
self.emb = nn.Embedding(self.num_vocab, embed_dim)
self.fc_in = nn.Linear(in_dim - self.num_vocab, embed_dim)
self.fc = nn.Linear(embed_dim, hidden_dim)
else:
self.emb = None
self.fc_in = None
self.fc = nn.Linear(in_dim, hidden_dim)
self.reduction_factor = reduction_factor
self.encoder = _TransformerEncoder(
hidden_channels=hidden_dim,
filter_channels=attention_dim,
n_heads=num_heads,
n_layers=num_layers,
kernel_size=kernel_size,
p_dropout=dropout,
)
self.fc_out = nn.Linear(hidden_dim, out_dim * reduction_factor)
if reduction_factor > 1 and downsample_by_conv:
self.conv_downsample = nn.Conv1d(
in_dim,
in_dim,
kernel_size=reduction_factor,
stride=reduction_factor,
groups=in_dim,
)
else:
self.conv_downsample = None
for f in [self.fc_in, self.emb, self.fc, self.fc_out]:
if f is not None:
init_weights(f, init_type)
def forward(self, x, lengths=None, y=None):
"""Forward pass
Args:
x (torch.Tensor): input tensor
lengths (torch.Tensor): input sequence lengths
y (torch.Tensor): target tensor (optional)
Returns:
torch.Tensor: output tensor
"""
if isinstance(lengths, list):
lengths = torch.tensor(lengths).to(x.device)
if self.embed_dim is not None:
x_first, x_ph_onehot, x_last = torch.split(
x,
[
self.in_ph_start_idx,
self.num_vocab,
self.in_dim - self.num_vocab - self.in_ph_start_idx,
],
dim=-1,
)
x_ph = torch.argmax(x_ph_onehot, dim=-1)
# Make sure to have one-hot vector
assert (x_ph_onehot.sum(-1) <= 1).all()
x = self.emb(x_ph) + self.fc_in(torch.cat([x_first, x_last], dim=-1))
# Adjust lengths based on the reduction factor
if self.reduction_factor > 1:
lengths = (lengths / self.reduction_factor).long()
if self.conv_downsample is not None:
x = self.conv_downsample(x.transpose(1, 2)).transpose(1, 2)
else:
x = x[:, self.reduction_factor - 1 :: self.reduction_factor]
x = self.fc(x)
# (B, T, C) -> (B, C, T)
x = x.transpose(1, 2)
x_mask = sequence_mask(lengths, x.shape[2]).unsqueeze(1).to(x.device)
x = self.encoder(x * x_mask, x_mask)
# (B, C, T) -> (B, T, C)
x = self.fc_out(x.transpose(1, 2)).view(x.shape[0], -1, self.out_dim)
return x
# For backward compatibility
# Will be removed in v0.1.0
def ResF0Conv1dResnet(*args, **kwargs):
from nnsvs.acoustic_models import ResF0Conv1dResnet
return ResF0Conv1dResnet(*args, **kwargs)
def ResF0Conv1dResnetMDN(*args, **kwargs):
from nnsvs.acoustic_models import ResF0Conv1dResnetMDN
return ResF0Conv1dResnetMDN(*args, **kwargs)
def ResF0VariancePredictor(*args, **kwargs):
from nnsvs.acoustic_models import ResF0VariancePredictor
return ResF0VariancePredictor(*args, **kwargs)
def ResSkipF0FFConvLSTM(*args, **kwargs):
from nnsvs.acoustic_models import ResSkipF0FFConvLSTM
return ResSkipF0FFConvLSTM(*args, **kwargs)
| 40,986 | 30.577042 | 89 | py |
nnsvs | nnsvs-master/nnsvs/gen.py | from warnings import warn
import librosa
import numpy as np
import pyloudnorm as pyln
import pysptk
import pyworld
import scipy
import torch
from nnmnkwii.frontend import merlin as fe
from nnmnkwii.io import hts
from nnmnkwii.postfilters import merlin_post_filter
from nnmnkwii.preprocessing.f0 import interp1d
from nnsvs.base import PredictionType
from nnsvs.dsp import bandpass_filter
from nnsvs.io.hts import (
get_note_frame_indices,
get_note_indices,
get_pitch_index,
get_pitch_indices,
)
from nnsvs.multistream import (
get_static_stream_sizes,
get_windows,
multi_stream_mlpg,
split_streams,
)
from nnsvs.pitch import gen_sine_vibrato, lowpass_filter
from nnsvs.postfilters import variance_scaling
from sklearn.preprocessing import MinMaxScaler
def _midi_to_hz(x, idx, log_f0=False):
z = np.zeros(len(x))
indices = x[:, idx] > 0
z[indices] = librosa.midi_to_hz(x[indices, idx])
if log_f0:
z[indices] = np.log(z[indices])
return z
def _is_silence(label):
is_full_context = "@" in label
if is_full_context:
is_silence = "-sil" in label or "-pau" in label
else:
is_silence = label == "sil" or label == "pau"
return is_silence
@torch.no_grad()
def predict_timelag(
device,
labels,
timelag_model,
timelag_config,
timelag_in_scaler,
timelag_out_scaler,
binary_dict,
numeric_dict,
pitch_indices=None,
log_f0_conditioning=True,
allowed_range=None,
allowed_range_rest=None,
force_clip_input_features=False,
frame_period=5,
):
"""Predict time-lag from HTS labels
Args:
device (torch.device): device
labels (nnmnkwii.io.hts.HTSLabelFile): HTS-style labels
timelag_model (nn.Module): time-lag model
timelag_config (dict): time-lag model config
timelag_in_scaler (sklearn.preprocessing.MinMaxScaler): input scaler
timelag_out_scaler (sklearn.preprocessing.MinMaxScaler): output scaler
binary_dict (dict): binary feature dict
numeric_dict (dict): numeric feature dict
pitch_indices (list): indices of pitch features
log_f0_conditioning (bool): whether to condition on log f0
allowed_range (list): allowed range of time-lag
allowed_range_rest (list): allowed range of time-lag for rest
force_clip_input_features (bool): whether to clip input features
Returns;
ndarray: time-lag predictions
"""
hts_frame_shift = int(frame_period * 1e4)
# make sure to set frame shift properly before calling round_ method
labels.frame_shift = hts_frame_shift
if pitch_indices is None:
pitch_indices = get_pitch_indices(binary_dict, numeric_dict)
if allowed_range is None:
allowed_range = [-20, 20]
if allowed_range_rest is None:
allowed_range_rest = [-40, 40]
# round start/end times just in case.
labels.round_()
# Extract note-level labels
note_indices = get_note_indices(labels)
note_labels = labels[note_indices]
# Extract musical/linguistic context
timelag_linguistic_features = fe.linguistic_features(
note_labels,
binary_dict,
numeric_dict,
add_frame_features=False,
subphone_features=None,
frame_shift=hts_frame_shift,
).astype(np.float32)
# Adjust input features if we use log-f0 conditioning
if log_f0_conditioning:
if pitch_indices is None:
raise ValueError("Pitch feature indices must be specified!")
for idx in pitch_indices:
timelag_linguistic_features[:, idx] = interp1d(
_midi_to_hz(timelag_linguistic_features, idx, log_f0_conditioning),
kind="slinear",
)
# Normalization
timelag_linguistic_features = timelag_in_scaler.transform(
timelag_linguistic_features
)
if force_clip_input_features and isinstance(timelag_in_scaler, MinMaxScaler):
# clip to feature range (except for pitch-related features)
non_pitch_indices = [
idx
for idx in range(timelag_linguistic_features.shape[1])
if idx not in pitch_indices
]
timelag_linguistic_features[:, non_pitch_indices] = np.clip(
timelag_linguistic_features[:, non_pitch_indices],
timelag_in_scaler.feature_range[0],
timelag_in_scaler.feature_range[1],
)
# Run model
x = torch.from_numpy(timelag_linguistic_features).unsqueeze(0).to(device)
# Run model
if timelag_model.prediction_type() == PredictionType.PROBABILISTIC:
# (B, T, D_out)
max_mu, max_sigma = timelag_model.inference(x, [x.shape[1]])
if np.any(timelag_config.has_dynamic_features):
# Apply denormalization
# (B, T, D_out) -> (T, D_out)
max_sigma_sq = (
max_sigma.squeeze(0).cpu().data.numpy() ** 2 * timelag_out_scaler.var_
)
max_sigma_sq = np.maximum(max_sigma_sq, 1e-14)
max_mu = timelag_out_scaler.inverse_transform(
max_mu.squeeze(0).cpu().data.numpy()
)
# (T, D_out) -> (T, static_dim)
pred_timelag = multi_stream_mlpg(
max_mu,
max_sigma_sq,
get_windows(timelag_config.num_windows),
timelag_config.stream_sizes,
timelag_config.has_dynamic_features,
)
else:
# Apply denormalization
pred_timelag = timelag_out_scaler.inverse_transform(
max_mu.squeeze(0).cpu().data.numpy()
)
else:
# (T, D_out)
pred_timelag = (
timelag_model.inference(x, [x.shape[1]]).squeeze(0).cpu().data.numpy()
)
# Apply denormalization
pred_timelag = timelag_out_scaler.inverse_transform(pred_timelag)
if np.any(timelag_config.has_dynamic_features):
# (T, D_out) -> (T, static_dim)
pred_timelag = multi_stream_mlpg(
pred_timelag,
timelag_out_scaler.var_,
get_windows(timelag_config.num_windows),
timelag_config.stream_sizes,
timelag_config.has_dynamic_features,
)
# Rounding
pred_timelag = np.round(pred_timelag)
# Clip to the allowed range
for idx in range(len(pred_timelag)):
if _is_silence(note_labels.contexts[idx]):
pred_timelag[idx] = np.clip(
pred_timelag[idx], allowed_range_rest[0], allowed_range_rest[1]
)
else:
pred_timelag[idx] = np.clip(
pred_timelag[idx], allowed_range[0], allowed_range[1]
)
# frames -> 100 ns
pred_timelag *= hts_frame_shift
return pred_timelag
@torch.no_grad()
def predict_duration(
device,
labels,
duration_model,
duration_config,
duration_in_scaler,
duration_out_scaler,
binary_dict,
numeric_dict,
pitch_indices=None,
log_f0_conditioning=True,
force_clip_input_features=False,
frame_period=5,
):
"""Predict phoneme durations from HTS labels
Args:
device (torch.device): device to run the model on
labels (nnmnkwii.io.hts.HTSLabelFile): labels
duration_model (nn.Module): duration model
duration_config (dict): duration config
duration_in_scaler (sklearn.preprocessing.MinMaxScaler): duration input scaler
duration_out_scaler (sklearn.preprocessing.MinMaxScaler): duration output scaler
binary_dict (dict): binary feature dictionary
numeric_dict (dict): numeric feature dictionary
pitch_indices (list): indices of pitch features
log_f0_conditioning (bool): whether to use log-f0 conditioning
force_clip_input_features (bool): whether to clip input features
Returns:
np.ndarray: predicted durations
"""
hts_frame_shift = int(frame_period * 1e4)
if pitch_indices is None:
pitch_indices = get_pitch_indices(binary_dict, numeric_dict)
# Extract musical/linguistic features
duration_linguistic_features = fe.linguistic_features(
labels,
binary_dict,
numeric_dict,
add_frame_features=False,
subphone_features=None,
frame_shift=hts_frame_shift,
).astype(np.float32)
if log_f0_conditioning:
for idx in pitch_indices:
duration_linguistic_features[:, idx] = interp1d(
_midi_to_hz(duration_linguistic_features, idx, log_f0_conditioning),
kind="slinear",
)
# Apply normalization
duration_linguistic_features = duration_in_scaler.transform(
duration_linguistic_features
)
if force_clip_input_features and isinstance(duration_in_scaler, MinMaxScaler):
# clip to feature range (except for pitch-related features)
non_pitch_indices = [
idx
for idx in range(duration_linguistic_features.shape[1])
if idx not in pitch_indices
]
duration_linguistic_features[:, non_pitch_indices] = np.clip(
duration_linguistic_features[:, non_pitch_indices],
duration_in_scaler.feature_range[0],
duration_in_scaler.feature_range[1],
)
# Apply model
x = torch.from_numpy(duration_linguistic_features).float().to(device)
x = x.view(1, -1, x.size(-1))
if duration_model.prediction_type() == PredictionType.PROBABILISTIC:
# (B, T, D_out)
max_mu, max_sigma = duration_model.inference(x, [x.shape[1]])
if np.any(duration_config.has_dynamic_features):
raise RuntimeError(
"Dynamic features are not supported for duration modeling"
)
# Apply denormalization
max_sigma_sq = (
max_sigma.squeeze(0).cpu().data.numpy() ** 2 * duration_out_scaler.var_
)
max_sigma_sq = np.maximum(max_sigma_sq, 1e-14)
max_mu = duration_out_scaler.inverse_transform(
max_mu.squeeze(0).cpu().data.numpy()
)
return max_mu, max_sigma_sq
else:
# (T, D_out)
pred_durations = (
duration_model.inference(x, [x.shape[1]]).squeeze(0).cpu().data.numpy()
)
# Apply denormalization
pred_durations = duration_out_scaler.inverse_transform(pred_durations)
if np.any(duration_config.has_dynamic_features):
# (T, D_out) -> (T, static_dim)
pred_durations = multi_stream_mlpg(
pred_durations,
duration_out_scaler.var_,
get_windows(duration_config.num_windows),
duration_config.stream_sizes,
duration_config.has_dynamic_features,
)
pred_durations[pred_durations <= 0] = 1
pred_durations = np.round(pred_durations)
return pred_durations
def postprocess_duration(labels, pred_durations, lag, frame_period=5):
"""Post-process durations based on predicted time-lag
Ref : https://arxiv.org/abs/2108.02776
Args:
labels (HTSLabelFile): HTS labels
pred_durations (array or tuple): predicted durations for non-MDN,
mean and variance for MDN
lag (array): predicted time-lag
Returns:
HTSLabelFile: labels with adjusted durations
"""
hts_frame_shift = int(frame_period * 1e4)
note_indices = get_note_indices(labels)
# append the end of note
note_indices.append(len(labels))
is_mdn = isinstance(pred_durations, tuple) and len(pred_durations) == 2
output_labels = hts.HTSLabelFile()
for i in range(1, len(note_indices)):
p = labels[note_indices[i - 1] : note_indices[i]]
# Compute note duration with time-lag
# eq (11)
L = int(fe.duration_features(p)[0])
if i < len(note_indices) - 1:
L_hat = L - (lag[i - 1] - lag[i]) / hts_frame_shift
else:
L_hat = L - (lag[i - 1]) / hts_frame_shift
# Prevent negative duration
L_hat = max(L_hat, 1)
# adjust the start time of the note
p.start_times = np.minimum(
np.asarray(p.start_times) + lag[i - 1].reshape(-1),
np.asarray(p.end_times) - hts_frame_shift * len(p),
)
p.start_times = np.maximum(p.start_times, 0)
if len(output_labels) > 0:
p.start_times = np.maximum(
p.start_times, output_labels.start_times[-1] + hts_frame_shift
)
# Compute normalized phoneme durations
if is_mdn:
mu = pred_durations[0][note_indices[i - 1] : note_indices[i]]
sigma_sq = pred_durations[1][note_indices[i - 1] : note_indices[i]]
# eq (17)
rho = (L_hat - mu.sum()) / sigma_sq.sum()
# eq (16)
d_norm = mu + rho * sigma_sq
if np.any(d_norm <= 0):
# eq (12) (using mu as d_hat)
s = frame_period * 0.001
print(
f"Negative phoneme durations are predicted at {i}-th note. "
"The note duration: ",
f"{round(float(L)*s,3)} sec -> {round(float(L_hat)*s,3)} sec",
)
print(
"It's likely that the model couldn't predict correct durations "
"for short notes."
)
print(
f"Variance scaling based durations (in frame):\n{(mu + rho * sigma_sq)}"
)
print(
f"Fallback to uniform scaling (in frame):\n{(L_hat * mu / mu.sum())}"
)
d_norm = L_hat * mu / mu.sum()
else:
# eq (12)
d_hat = pred_durations[note_indices[i - 1] : note_indices[i]]
d_norm = L_hat * d_hat / d_hat.sum()
d_norm = np.round(d_norm)
d_norm[d_norm <= 0] = 1
p.set_durations(d_norm)
if len(output_labels) > 0:
output_labels.end_times[-1] = p.start_times[0]
for n in p:
output_labels.append(n)
return output_labels
@torch.no_grad()
def predict_timing(
device,
labels,
binary_dict,
numeric_dict,
timelag_model,
timelag_config,
timelag_in_scaler,
timelag_out_scaler,
duration_model,
duration_config,
duration_in_scaler,
duration_out_scaler,
log_f0_conditioning=True,
allowed_range=None,
allowed_range_rest=None,
force_clip_input_features=True,
frame_period=5,
):
"""Predict timinigs from HTS labels
This is equivalent to ``predict_timelag + predict_duration + postprocess_duration``.
Args:
device (torch.device): device to run the model on
labels (nnmnkwii.io.hts.HTSLabelFile): labels
binary_dict (dict): binary feature dictionary
numeric_dict (dict): numeric feature dictionary
timelag_model (nn.Module): timelag model
timelag_config (dict): timelag config
timelag_in_scaler (sklearn.preprocessing.MinMaxScaler): timelag input scaler
timelag_out_scaler (sklearn.preprocessing.MinMaxScaler): timelag output scaler
duration_model (nn.Module): duration model
duration_config (dict): duration config
duration_in_scaler (sklearn.preprocessing.MinMaxScaler): duration input scaler
duration_out_scaler (sklearn.preprocessing.MinMaxScaler): duration output scaler
log_f0_conditioning (bool): whether to condition on log f0
allowed_range (list): allowed range of time-lag
allowed_range_rest (list): allowed range of time-lag for rest
force_clip_input_features (bool): whether to clip input features
frame_period (int): frame period in milliseconds
Returns:
nnmnkwii.io.hts.HTSLabelFile: duration modified labels
"""
hts_frame_shift = int(frame_period * 1e4)
labels.frame_shift = hts_frame_shift
pitch_indices = get_pitch_indices(binary_dict, numeric_dict)
# Time-lag
lag = predict_timelag(
device=device,
labels=labels,
timelag_model=timelag_model,
timelag_config=timelag_config,
timelag_in_scaler=timelag_in_scaler,
timelag_out_scaler=timelag_out_scaler,
binary_dict=binary_dict,
numeric_dict=numeric_dict,
pitch_indices=pitch_indices,
log_f0_conditioning=log_f0_conditioning,
allowed_range=allowed_range,
allowed_range_rest=allowed_range_rest,
force_clip_input_features=force_clip_input_features,
frame_period=frame_period,
)
# Duration predictions
durations = predict_duration(
device=device,
labels=labels,
duration_model=duration_model,
duration_config=duration_config,
duration_in_scaler=duration_in_scaler,
duration_out_scaler=duration_out_scaler,
binary_dict=binary_dict,
numeric_dict=numeric_dict,
pitch_indices=pitch_indices,
log_f0_conditioning=log_f0_conditioning,
force_clip_input_features=force_clip_input_features,
frame_period=frame_period,
)
# Normalize phoneme durations
duration_modified_labels = postprocess_duration(labels, durations, lag)
return duration_modified_labels
@torch.no_grad()
def predict_acoustic(
device,
labels,
acoustic_model,
acoustic_config,
acoustic_in_scaler,
acoustic_out_scaler,
binary_dict,
numeric_dict,
subphone_features="coarse_coding",
pitch_indices=None,
log_f0_conditioning=True,
force_clip_input_features=False,
frame_period=5,
f0_shift_in_cent=0,
):
"""Predict acoustic features from HTS labels
MLPG is applied to the predicted features if the output features have
dynamic features.
Args:
device (torch.device): device to use
labels (HTSLabelFile): HTS labels
acoustic_model (nn.Module): acoustic model
acoustic_config (AcousticConfig): acoustic configuration
acoustic_in_scaler (sklearn.preprocessing.StandardScaler): input scaler
acoustic_out_scaler (sklearn.preprocessing.StandardScaler): output scaler
binary_dict (dict): binary feature dictionary
numeric_dict (dict): numeric feature dictionary
subphone_features (str): subphone feature type
pitch_indices (list): indices of pitch features
log_f0_conditioning (bool): whether to use log f0 conditioning
force_clip_input_features (bool): whether to force clip input features
frame_period (float): frame period in msec
f0_shift_in_cent (float): F0 shift in cent-scale before the inference
Returns:
ndarray: predicted acoustic features
"""
hts_frame_shift = int(frame_period * 1e4)
if pitch_indices is None:
pitch_indices = get_pitch_indices(binary_dict, numeric_dict)
# Musical/linguistic features
linguistic_features = fe.linguistic_features(
labels,
binary_dict,
numeric_dict,
add_frame_features=True,
subphone_features=subphone_features,
frame_shift=hts_frame_shift,
)
if log_f0_conditioning:
for idx in pitch_indices:
linguistic_features[:, idx] = interp1d(
_midi_to_hz(linguistic_features, idx, log_f0_conditioning),
kind="slinear",
)
if f0_shift_in_cent != 0:
lf0_offset = f0_shift_in_cent * np.log(2) / 1200
linguistic_features[:, idx] += lf0_offset
# Apply normalization
linguistic_features = acoustic_in_scaler.transform(linguistic_features)
if force_clip_input_features and isinstance(acoustic_in_scaler, MinMaxScaler):
# clip to feature range (except for pitch-related features)
non_pitch_indices = [
idx
for idx in range(linguistic_features.shape[1])
if idx not in pitch_indices
]
linguistic_features[:, non_pitch_indices] = np.clip(
linguistic_features[:, non_pitch_indices],
acoustic_in_scaler.feature_range[0],
acoustic_in_scaler.feature_range[1],
)
# Predict acoustic features
x = torch.from_numpy(linguistic_features).float().to(device)
x = x.view(1, -1, x.size(-1))
if acoustic_model.prediction_type() in [
PredictionType.PROBABILISTIC,
PredictionType.MULTISTREAM_HYBRID,
]:
# (B, T, D_out)
max_mu, max_sigma = acoustic_model.inference(x, [x.shape[1]])
if np.any(acoustic_config.has_dynamic_features):
# Apply denormalization
# (B, T, D_out) -> (T, D_out)
max_sigma_sq = (
max_sigma.squeeze(0).cpu().data.numpy() ** 2 * acoustic_out_scaler.var_
)
max_sigma_sq = np.maximum(max_sigma_sq, 1e-14)
max_mu = acoustic_out_scaler.inverse_transform(
max_mu.squeeze(0).cpu().data.numpy()
)
# (T, D_out) -> (T, static_dim)
pred_acoustic = multi_stream_mlpg(
max_mu,
max_sigma_sq,
get_windows(acoustic_config.num_windows),
acoustic_config.stream_sizes,
acoustic_config.has_dynamic_features,
)
else:
# Apply denormalization
pred_acoustic = acoustic_out_scaler.inverse_transform(
max_mu.squeeze(0).cpu().data.numpy()
)
else:
# (T, D_out)
pred_acoustic = (
acoustic_model.inference(x, [x.shape[1]]).squeeze(0).cpu().data.numpy()
)
# Apply denormalization
pred_acoustic = acoustic_out_scaler.inverse_transform(pred_acoustic)
if np.any(acoustic_config.has_dynamic_features):
# (T, D_out) -> (T, static_dim)
pred_acoustic = multi_stream_mlpg(
pred_acoustic,
acoustic_out_scaler.var_,
get_windows(acoustic_config.num_windows),
acoustic_config.stream_sizes,
acoustic_config.has_dynamic_features,
)
return pred_acoustic
@torch.no_grad()
def postprocess_acoustic(
device,
acoustic_features,
duration_modified_labels,
binary_dict,
numeric_dict,
acoustic_config,
acoustic_out_static_scaler,
postfilter_model=None,
postfilter_config=None,
postfilter_out_scaler=None,
sample_rate=48000,
frame_period=5,
relative_f0=False,
feature_type="world",
post_filter_type="gv",
trajectory_smoothing=True,
trajectory_smoothing_cutoff=50,
trajectory_smoothing_cutoff_f0=20,
vuv_threshold=0.5,
f0_shift_in_cent=0,
vibrato_scale=1.0,
force_fix_vuv=False,
fill_silence_to_rest=False,
):
"""Post-process acoustic features
The function converts acoustic features in single ndarray to tuple of
multi-stream acoustic features.
e.g., array -> (mgc, lf0, vuv, bap)
Args:
device (torch.device): Device.
duration_modified_labels (nnmnkwii.io.hts.HTSLabelFile): HTS label file.
binary_dict (dict): Dictionary of binary features.
numeric_dict (dict): Dictionary of numeric features.
acoustic_config (dict): Acoustic model configuration.
acoustic_features (np.ndarray): Acoustic features.
acoustic_out_static_scaler (sklearn.preprocessing.StandardScaler): Scaler
for acoustic features.
postfilter_model (nn.Module): Post-filter model.
postfilter_config (dict): Post-filter model configuration.
postfilter_out_scaler (sklearn.preprocessing.StandardScaler): Scaler for post-filter.
sample_rate (int): Sampling rate.
frame_period (float): Frame period in milliseconds.
relative_f0 (bool): If True, use relative f0.
feature_type (str): Feature type.
post_filter_type (str): Post-filter type.
One of ``gv``, ``merlin`` or ``nnsvs``. Recommended to use ``gv``
for general purpose.
trajectory_smoothing (bool): Whether to apply trajectory smoothing.
trajectory_smoothing_cutoff (float): Cutoff frequency for trajectory smoothing
of spectral features.
trajectory_smoothing_cutoff_f0 (float): Cutoff frequency for trajectory smoothing of f0.
vuv_threshold (float): V/UV threshold.
f0_shift_in_cent (float): F0 shift in cents.
vibrato_scale (float): Vibrato scale.
force_fix_vuv (bool): If True, force to fix V/UV.
fill_silence_to_rest (bool): Fill silence to rest frames.
Returns:
tuple: Post-processed acoustic features.
"""
hts_frame_shift = int(frame_period * 1e4)
pitch_idx = get_pitch_index(binary_dict, numeric_dict)
static_stream_sizes = get_static_stream_sizes(
acoustic_config.stream_sizes,
acoustic_config.has_dynamic_features,
acoustic_config.num_windows,
)
linguistic_features = fe.linguistic_features(
duration_modified_labels,
binary_dict,
numeric_dict,
add_frame_features=True,
frame_shift=hts_frame_shift,
)
# GV post-filter
if post_filter_type == "gv" or (
post_filter_type == "nnsvs" and feature_type == "world"
):
note_frame_indices = get_note_frame_indices(
binary_dict, numeric_dict, linguistic_features
)
if feature_type == "world":
offset = 2
elif feature_type == "melf0":
# NOTE: set offset so that post-filter does not affect F0
mel_freq = librosa.mel_frequencies(
n_mels=80, fmin=63, fmax=sample_rate // 2
)
# NOTE: the threshold could be tuned for better performance
offset = np.argmax(mel_freq > 1200)
# NOTE: apply the post-filter for note frames only
mgc_end_dim = static_stream_sizes[0]
acoustic_features[:, :mgc_end_dim] = variance_scaling(
acoustic_out_static_scaler.var_.reshape(-1)[:mgc_end_dim],
acoustic_features[:, :mgc_end_dim],
offset=offset,
note_frame_indices=note_frame_indices,
)
# Learned post-filter using nnsvs
if post_filter_type == "nnsvs" and postfilter_model is not None:
# (1) Raw spectrogram or (2) mgc
rawsp_output = postfilter_config.stream_sizes[0] >= 128
# If the post-filter output is raw spectrogrma, convert mgc to log spectrogram
if rawsp_output:
outs = split_streams(acoustic_features, static_stream_sizes)
assert len(outs) == 4
mgc, lf0, vuv, bap = outs
fft_size = pyworld.get_cheaptrick_fft_size(sample_rate)
sp = pyworld.decode_spectral_envelope(
mgc.astype(np.float64), sample_rate, fft_size
).astype(np.float32)
sp = np.log(sp)
acoustic_features = np.concatenate([sp, lf0, vuv, bap], axis=-1)
in_feats = torch.from_numpy(acoustic_features).float().unsqueeze(0)
in_feats = postfilter_out_scaler.transform(in_feats).float().to(device)
# Run inference
out_feats = postfilter_model.inference(in_feats, [in_feats.shape[1]])
acoustic_features = (
postfilter_out_scaler.inverse_transform(out_feats.cpu()).squeeze(0).numpy()
)
# Convert log spectrogram to mgc
# NOTE: mgc is used to reduce possible artifacts
# Ref: https://bit.ly/3AHjstU
if rawsp_output:
sp, lf0, vuv, bap = split_streams(
acoustic_features, postfilter_config.stream_sizes
)
sp = np.exp(sp)
mgc = pyworld.code_spectral_envelope(
sp.astype(np.float64), sample_rate, 60
).astype(np.float32)
acoustic_features = np.concatenate([mgc, lf0, vuv, bap], axis=-1)
# Generate WORLD parameters
if feature_type == "world":
mgc, lf0, vuv, bap = gen_spsvs_static_features(
labels=duration_modified_labels,
acoustic_features=acoustic_features,
binary_dict=binary_dict,
numeric_dict=numeric_dict,
stream_sizes=acoustic_config.stream_sizes,
has_dynamic_features=acoustic_config.has_dynamic_features,
pitch_idx=pitch_idx,
num_windows=acoustic_config.num_windows,
frame_period=frame_period,
relative_f0=relative_f0,
vibrato_scale=vibrato_scale,
vuv_threshold=vuv_threshold,
force_fix_vuv=force_fix_vuv,
)
elif feature_type == "melf0":
mel, lf0, vuv = split_streams(acoustic_features, [80, 1, 1])
else:
raise ValueError(f"Unknown feature type: {feature_type}")
if fill_silence_to_rest:
mask = _get_nonrest_frame_soft_mask(
binary_dict, numeric_dict, linguistic_features
)
if feature_type == "world":
mgc, lf0, vuv, bap = _fill_silence_to_world_params(mgc, lf0, vuv, bap, mask)
elif feature_type == "melf0":
mel, lf0, vuv = _fill_silence_to_mel_params(mel, lf0, vuv, mask)
if f0_shift_in_cent != 0:
lf0_offset = f0_shift_in_cent * np.log(2) / 1200
lf0 = lf0 + lf0_offset
# NOTE: spectral enhancement based on the Merlin's post-filter implementation
if feature_type == "world" and post_filter_type == "merlin":
alpha = pysptk.util.mcepalpha(sample_rate)
mgc = merlin_post_filter(mgc, alpha)
# Remove high-frequency components of lf0/mgc/bap
# NOTE: Useful to reduce high-frequency artifacts
if trajectory_smoothing:
modfs = int(1 / (frame_period * 0.001))
lf0[:, 0] = lowpass_filter(
lf0[:, 0], modfs, cutoff=trajectory_smoothing_cutoff_f0
)
if feature_type == "world":
for d in range(mgc.shape[1]):
mgc[:, d] = lowpass_filter(
mgc[:, d], modfs, cutoff=trajectory_smoothing_cutoff
)
for d in range(bap.shape[1]):
bap[:, d] = lowpass_filter(
bap[:, d], modfs, cutoff=trajectory_smoothing_cutoff
)
elif feature_type == "melf0":
for d in range(mel.shape[1]):
mel[:, d] = lowpass_filter(
mel[:, d], modfs, cutoff=trajectory_smoothing_cutoff
)
if feature_type == "world":
use_mcep_aperiodicity = bap.shape[-1] > 5
if feature_type == "world" and not use_mcep_aperiodicity:
bap = np.clip(bap, a_min=-60, a_max=0)
if feature_type == "world":
return mgc, lf0, vuv, bap
elif feature_type == "melf0":
return mel, lf0, vuv
@torch.no_grad()
def predict_waveform(
device,
multistream_features,
vocoder=None,
vocoder_config=None,
vocoder_in_scaler=None,
sample_rate=48000,
frame_period=5,
use_world_codec=True,
feature_type="world",
vocoder_type="world",
vuv_threshold=0.5,
):
"""Predict waveform from multi-stream acoustic features
Vocoders can be 1) WORLD, 2) PWG or 3) uSFGAN.
Args:
device (torch.device): Device to run inference
features (tuple): Acoustic features
vocoder (nn.Module): Vocoder model
vocoder_config (dict): Vocoder config
vocoder_in_scaler (StandardScaler): Vocoder input scaler
sample_rate (int,): Sampling rate.
frame_period (float): Frame period in msec.
use_world_codec (bool): Whether to use WORLD codec for decoding.
feature_type (str): Feature type.
``world`` ``world_org``, ``melf0`` or ``neutrino``.
vocoder_type (str): Vocoder type. ``world`` or ``pwg`` or ``usfgan``
vuv_threshold (float): VUV threshold.
Returns:
np.ndarray: Predicted waveform
"""
if feature_type == "world":
mgc, lf0, vuv, bap = multistream_features
elif feature_type == "world_org":
f0, spectrogram, aperiodicity = multistream_features
elif feature_type == "neutrino":
mgc, f0, bap = multistream_features
# prepare (mgc, lf0, vuv, bap) to be compatible with NNSVS
lf0 = f0.copy()
lf0[np.nonzero(f0)] = np.log(f0[np.nonzero(f0)])
vuv = (f0 > 0).astype(np.float32)
elif feature_type == "melf0":
mel, lf0, vuv = multistream_features
else:
raise ValueError(f"Unknown feature type: {feature_type}")
# NOTE: `use_mcep_aperiodicity` was used for experimental purpose but didn't make
# significant difference. Please just ignore or ping @r9y9 for details.
if feature_type in ["world", "neutrino"]:
use_mcep_aperiodicity = bap.shape[-1] > 5
if feature_type == "neutrino" and not use_world_codec:
raise ValueError("use_world_codec must be True when feature_type is neutrino")
# Waveform generation by WORLD
if vocoder_type == "world":
if feature_type not in ["world", "world_org", "neutrino"]:
raise ValueError(f"Invalid feature type for WORLD vocoder: {feature_type}")
if feature_type == "world_org":
# NOTE: WORLD-based features are already converted to raw WORLD parameters
pass
else:
f0, spectrogram, aperiodicity = gen_world_params(
mgc,
lf0,
vuv,
bap,
sample_rate,
vuv_threshold=vuv_threshold,
use_world_codec=use_world_codec,
)
# make sure to have float64 typed parameters
wav = pyworld.synthesize(
f0.astype(np.float64),
spectrogram.astype(np.float64),
aperiodicity.astype(np.float64),
sample_rate,
frame_period,
)
elif vocoder_type == "pwg":
# NOTE: So far vocoder models are trained on binary V/UV features
vuv = (vuv > vuv_threshold).astype(np.float32)
if feature_type == "world":
voc_inp = (
torch.from_numpy(
vocoder_in_scaler.transform(
np.concatenate([mgc, lf0, vuv, bap], axis=-1)
)
)
.float()
.to(device)
)
elif feature_type == "melf0":
voc_inp = (
torch.from_numpy(
vocoder_in_scaler.transform(
np.concatenate([mel, lf0, vuv], axis=-1)
)
)
.float()
.to(device)
)
wav = vocoder.inference(voc_inp).view(-1).to("cpu").numpy()
elif vocoder_type == "usfgan":
if feature_type in ["world", "neutrino"]:
fftlen = pyworld.get_cheaptrick_fft_size(sample_rate)
if use_mcep_aperiodicity:
# Convert mel-cepstrum-based aperiodicity to WORLD's aperiodicity
aperiodicity_order = bap.shape[-1] - 1
alpha = pysptk.util.mcepalpha(sample_rate)
aperiodicity = pysptk.mc2sp(
np.ascontiguousarray(bap).astype(np.float64),
fftlen=fftlen,
alpha=alpha,
)
else:
aperiodicity = pyworld.decode_aperiodicity(
np.ascontiguousarray(bap).astype(np.float64),
sample_rate,
fftlen,
)
# fill aperiodicity with ones for unvoiced regions
aperiodicity[vuv.reshape(-1) < vuv_threshold, 0] = 1.0
# WORLD fails catastrophically for out of range aperiodicity
aperiodicity = np.clip(aperiodicity, 0.0, 1.0)
# Convert aperiodicity back to BAP
if use_mcep_aperiodicity:
bap = pysptk.sp2mc(
aperiodicity,
order=aperiodicity_order,
alpha=alpha,
)
else:
bap = pyworld.code_aperiodicity(aperiodicity, sample_rate).astype(
np.float32
)
aux_feats = [mgc, bap]
elif feature_type == "melf0":
aux_feats = [mel]
elif feature_type == "world_org":
# it is possible to implement here but I suppose nobody wants to use
raise NotImplementedError()
aux_feats = (
torch.from_numpy(
vocoder_in_scaler.transform(np.concatenate(aux_feats, axis=-1))
)
.float()
.to(device)
)
contf0 = np.exp(lf0)
if vocoder_config.data.sine_f0_type in ["contf0", "cf0"]:
f0_inp = contf0
elif vocoder_config.data.sine_f0_type == "f0":
f0_inp = contf0
f0_inp[vuv < vuv_threshold] = 0
# NOTE: uSFGAN internally performs normalization
# so we don't need to normalize inputs here
wav = vocoder.inference(f0_inp, aux_feats).view(-1).to("cpu").numpy()
return wav
def postprocess_waveform(
wav,
sample_rate,
dtype=np.int16,
peak_norm=False,
loudness_norm=False,
target_loudness=-20.0,
):
"""Perform post-processing for synthesized waveform
Args:
wav (ndarray): The input waveform
sample_rate (int): The sampling rate
dtype (np.dtype): The dtype of output waveform. Default is np.int16.
peak_norm (bool): Whether to perform peak normalization
loudness_norm (bool): Whether to perform loudness normalization
target_loudness (float): Target loudness in dB
Returns:
ndarray: The post-processed waveform
"""
wav = bandpass_filter(wav, sample_rate)
# Peak normalize audio to 0 dB
if peak_norm:
wav = pyln.normalize.peak(wav, 0.0)
# Normalize loudnes
# NOTE: -20 dB is roughly the same as the NEURINO (NSF ver.)
if loudness_norm:
meter = pyln.Meter(sample_rate)
loudness = meter.integrated_loudness(wav)
wav = pyln.normalize.loudness(wav, loudness, target_loudness)
# NOTE: use np.int16 to save disk space
if dtype in [np.int16, "int16"]:
# NOTE: NNSVS (>=0.0.3) uses waveforms normalized in [-1, 1] for training.
# so the following code shouldn't be used but in case for models trained
# with earlier NNSVS
if np.max(np.abs(wav)) > 10:
# data is likely already in [-32768, 32767]
wav = wav.astype(np.int16)
elif np.max(np.abs(wav)) <= 1:
wav = (wav * 32767.0).astype(np.int16)
else:
# may need to handle int32 data (if any)
warn("Unexpected waveform range: {} - {}".format(np.min(wav), np.max(wav)))
warn("Failed to convert to int16. Returning waveform with floating point.")
elif dtype is None:
pass
else:
wav = wav.astype(dtype)
return wav
def _get_nonrest_frame_soft_mask(
binary_dict,
numeric_dict,
linguistic_features,
win_length=200,
duration_threshold=1.0,
):
"""Get mask for non-rest frames
Args:
binary_dict (dict): Dictionary for binary features
numeric_dict (dict): Dictionary for numeric features
linguistic_features (ndarray): Linguistic features
win_length (int): Window length
Returns:
ndarray: Soft mask for non-rest frames.
1 for non-rest frames and 0 for otherwise.
"""
mask = np.ones(len(linguistic_features))
in_sil_indices = []
for k, v in binary_dict.items():
name, _ = v
if "C-Phone_sil" in name or "C-Phone_pau" in name:
in_sil_indices.append(k)
if len(in_sil_indices) == 0:
return mask
in_note_dur_idx = None
for k, v in numeric_dict.items():
name, _ = v
if "e7" in name:
in_note_dur_idx = k
break
dur = linguistic_features[:, len(binary_dict) + in_note_dur_idx]
dur_in_sec = dur * 0.01
for in_sil_idx in in_sil_indices:
# Only mask out sil/pau segments over ${silence_threshold} sec. such as long pause
mask[
(linguistic_features[:, in_sil_idx] > 0) & (dur_in_sec > duration_threshold)
] = 0
# make a smoothed mask with ${win_length} * 5ms window length
mask = scipy.signal.convolve(mask, np.ones(win_length) / win_length, mode="same")
# make sure that we don't mask out frames where notes are assigned
pitch_idx = get_pitch_index(binary_dict, numeric_dict)
score_f0 = linguistic_features[:, pitch_idx]
mask[score_f0 > 0] = 1.0
return mask.reshape(-1, 1)
def _fill_silence_to_world_params(mgc, lf0, vuv, bap, mask):
mgc_sil = np.zeros((1, mgc.shape[1]))
# NOTE: mgc_sil is a VERY ROUGH estimate of mgc for silence regions
# the speech signal is assumed to be in [-1, 1].
# sr = 48000
# noise = np.random.randn(sr * 10) * 1e-5
# f0, timeaxis = pyworld.harvest(noise, sr, frame_period=5)
# f0[:] = 0
# spectrogram = pyworld.cheaptrick(noise, f0, timeaxis, sr)
# mgc = pyworld.code_spectral_envelope(spectrogram, sr, 60)
# print(mgc.mean(0))
mgc_sil[0, 0] = -23.3
mgc_sil[0, 1] = 0.0679
mgc_sil[0, 2] = 0.00640
mgc_sil[0, 3:] = 1e-3
bap_sil = np.zeros_like(bap) + 1e-11
mgc = mgc * mask + (1 - mask) * mgc_sil
bap = bap * mask + (1 - mask) * bap_sil
return mgc, lf0, vuv, bap
def _fill_silence_to_mel_params(mel, lf0, vuv, mask):
# NOTE: -5.5 is also a very rough estimate of log-melspectrogram
# for silence regions
mel_sil = np.zeros((1, mel.shape[1])) - 5.5
mel = mel * mask + (1 - mask) * mel_sil
return mel, lf0, vuv
def correct_vuv_by_phone(vuv, binary_dict, linguistic_features):
"""Correct V/UV by phone-related flags in a hed file
This function allows us to control V/UV explicitly by ``C-VUV_Voiced``
and ``C-VUV_Unvoied`` flags in a hed file. This is useful when you see
your trained acoustic model have lots of V/UV errors.
Note that manually controlling V/UV means we are ignoring the
acoustic model's prediction. It would have negative impact in some
cases, but most cases it would help workaround V/UV errors.
Args:
vuv (ndarray): V/UV flags
binary_dict (dict): binary feature dictionary
linguistic_features (ndarray): linguistic features
Returns:
ndarray: corrected V/UV flags
"""
vuv = vuv.copy()
# Set V/UV to 1 based on the C-VUV_Voiced flag
in_voiced_idx = -1
for k, v in binary_dict.items():
name, _ = v
if "C-VUV_Voiced" in name:
in_voiced_idx = k
break
if in_voiced_idx > 0:
indices = linguistic_features[:, in_voiced_idx : in_voiced_idx + 1] > 0
vuv[indices] = 1.0
# Set V/UV to 0 based on the C-VUV_Unvoiced flag
in_unvoiced_indices = []
for k, v in binary_dict.items():
name, _ = v
if "C-VUV_Unvoiced" in name:
in_unvoiced_indices.append(k)
if len(in_unvoiced_indices) > 0:
for in_unvoiced_idx in in_unvoiced_indices:
indices = linguistic_features[:, in_unvoiced_idx : in_unvoiced_idx + 1] > 0
vuv[indices] = 0.0
# Set V/UV to 0 for sil/pau/br
in_sil_indices = []
for k, v in binary_dict.items():
name, _ = v
if "C-Phone_sil" in name or "C-Phone_pau" in name or "C-Phone_br" in name:
in_sil_indices.append(k)
if len(in_sil_indices) > 0:
for in_sil_idx in in_sil_indices:
indices = linguistic_features[:, in_sil_idx : in_sil_idx + 1] > 0
vuv[indices] = 0.0
return vuv
def gen_spsvs_static_features(
labels,
acoustic_features,
binary_dict,
numeric_dict,
stream_sizes,
has_dynamic_features,
pitch_idx=None,
num_windows=3,
frame_period=5,
relative_f0=True,
vibrato_scale=1.0,
vuv_threshold=0.3,
force_fix_vuv=True,
):
"""Generate static features from predicted acoustic features
Args:
labels (HTSLabelFile): HTS labels
acoustic_features (ndarray): predicted acoustic features
binary_dict (dict): binary feature dictionary
numeric_dict (dict): numeric feature dictionary
stream_sizes (list): stream sizes
has_dynamic_features (list): whether each stream has dynamic features
pitch_idx (int): index of pitch features
num_windows (int): number of windows
frame_period (float): frame period
relative_f0 (bool): whether to use relative f0
vibrato_scale (float): vibrato scale
vuv_threshold (float): vuv threshold
force_fix_vuv (bool): whether to use post-processing to fix VUV.
Returns:
tuple: tuple of mgc, lf0, vuv and bap.
"""
hts_frame_shift = int(frame_period * 1e4)
if pitch_idx is None:
pitch_idx = get_pitch_index(binary_dict, numeric_dict)
if np.any(has_dynamic_features):
static_stream_sizes = get_static_stream_sizes(
stream_sizes, has_dynamic_features, num_windows
)
else:
static_stream_sizes = stream_sizes
# Copy here to avoid inplace operations on input acoustic features
acoustic_features = acoustic_features.copy()
# Split multi-stream features
streams = split_streams(acoustic_features, static_stream_sizes)
if len(streams) == 4:
mgc, target_f0, vuv, bap = streams
vib, vib_flags = None, None
elif len(streams) == 5:
# Assuming diff-based vibrato parameters
mgc, target_f0, vuv, bap, vib = streams
vib_flags = None
elif len(streams) == 6:
# Assuming sine-based vibrato parameters
mgc, target_f0, vuv, bap, vib, vib_flags = streams
else:
raise RuntimeError("Not supported streams")
linguistic_features = fe.linguistic_features(
labels,
binary_dict,
numeric_dict,
add_frame_features=True,
frame_shift=hts_frame_shift,
)
# Correct V/UV based on special phone flags
if force_fix_vuv:
vuv = correct_vuv_by_phone(vuv, binary_dict, linguistic_features)
# F0
if relative_f0:
diff_lf0 = target_f0
f0_score = _midi_to_hz(linguistic_features, pitch_idx, False)[:, None]
lf0_score = f0_score.copy()
nonzero_indices = np.nonzero(lf0_score)
lf0_score[nonzero_indices] = np.log(f0_score[nonzero_indices])
lf0_score = interp1d(lf0_score, kind="slinear")
f0 = diff_lf0 + lf0_score
f0[vuv < vuv_threshold] = 0
f0[np.nonzero(f0)] = np.exp(f0[np.nonzero(f0)])
else:
f0 = target_f0
f0[vuv < vuv_threshold] = 0
f0[np.nonzero(f0)] = np.exp(f0[np.nonzero(f0)])
if vib is not None:
if vib_flags is not None:
# Generate sine-based vibrato
vib_flags = vib_flags.flatten()
m_a, m_f = vib[:, 0], vib[:, 1]
# Fill zeros for non-vibrato frames
m_a[vib_flags < 0.5] = 0
m_f[vib_flags < 0.5] = 0
# Gen vibrato
sr_f0 = int(1 / (frame_period * 0.001))
f0 = gen_sine_vibrato(f0.flatten(), sr_f0, m_a, m_f, vibrato_scale)
else:
# Generate diff-based vibrato
f0 = f0.flatten() + vibrato_scale * vib.flatten()
# NOTE: Back to log-domain for convenience
lf0 = f0.copy()
lf0[np.nonzero(lf0)] = np.log(f0[np.nonzero(lf0)])
# NOTE: interpolation is necessary
lf0 = interp1d(lf0, kind="slinear")
lf0 = lf0[:, None] if len(lf0.shape) == 1 else lf0
vuv = vuv[:, None] if len(vuv.shape) == 1 else vuv
return mgc, lf0, vuv, bap
def gen_world_params(
mgc,
lf0,
vuv,
bap,
sample_rate,
vuv_threshold=0.3,
use_world_codec=False,
):
"""Generate WORLD parameters from mgc, lf0, vuv and bap.
Args:
mgc (ndarray): mgc
lf0 (ndarray): lf0
vuv (ndarray): vuv
bap (ndarray): bap
sample_rate (int): sample rate
vuv_threshold (float): threshold for VUV
use_world_codec (bool): whether to use WORLD codec for spectral envelope
Returns:
tuple: tuple of f0, spectrogram and aperiodicity
"""
fftlen = pyworld.get_cheaptrick_fft_size(sample_rate)
alpha = pysptk.util.mcepalpha(sample_rate)
use_mcep_aperiodicity = bap.shape[-1] > 5
if use_world_codec:
spectrogram = pyworld.decode_spectral_envelope(
np.ascontiguousarray(mgc).astype(np.float64), sample_rate, fftlen
)
else:
spectrogram = pysptk.mc2sp(
np.ascontiguousarray(mgc), fftlen=fftlen, alpha=alpha
)
if use_mcep_aperiodicity:
aperiodicity = pysptk.mc2sp(
np.ascontiguousarray(bap), fftlen=fftlen, alpha=alpha
)
else:
aperiodicity = pyworld.decode_aperiodicity(
np.ascontiguousarray(bap).astype(np.float64), sample_rate, fftlen
)
# fill aperiodicity with ones for unvoiced regions
aperiodicity[vuv.reshape(-1) < vuv_threshold, 0] = 1.0
# WORLD fails catastrophically for out of range aperiodicity
aperiodicity = np.clip(aperiodicity, 0.0, 1.0)
f0 = lf0.copy()
f0[np.nonzero(f0)] = np.exp(f0[np.nonzero(f0)])
f0[vuv < vuv_threshold] = 0
f0 = f0.flatten().astype(np.float64)
spectrogram = spectrogram.astype(np.float64)
aperiodicity = aperiodicity.astype(np.float64)
return f0, spectrogram, aperiodicity
| 49,738 | 34.275887 | 96 | py |
nnsvs | nnsvs-master/nnsvs/postfilters.py | import numpy as np
import torch
from nnsvs.base import BaseModel
from nnsvs.multistream import split_streams
from nnsvs.util import init_weights
from torch import nn
def variance_scaling(gv, feats, offset=2, note_frame_indices=None):
"""Variance scaling method to enhance synthetic speech quality
Method proposed in :cite:t:`silen2012ways`.
Args:
gv (tensor): global variance computed over training data
feats (tensor): input features
offset (int): offset
note_frame_indices (tensor): indices of note frames
Returns:
tensor: scaled features
"""
if note_frame_indices is not None:
if len(note_frame_indices) == 0:
return feats
utt_gv = feats[note_frame_indices].var(0)
utt_mu = feats[note_frame_indices].mean(0)
else:
utt_gv = feats.var(0)
utt_mu = feats.mean(0)
out = feats.copy()
if note_frame_indices is not None:
out[note_frame_indices, offset:] = (
np.sqrt(gv[offset:] / utt_gv[offset:])
* (feats[note_frame_indices, offset:] - utt_mu[offset:])
+ utt_mu[offset:]
)
else:
out[:, offset:] = (
np.sqrt(gv[offset:] / utt_gv[offset:])
* (feats[:, offset:] - utt_mu[offset:])
+ utt_mu[offset:]
)
return out
class MovingAverage1d(nn.Conv1d):
"""Moving average filter on 1-D signals
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): kernel size
padding_mode (str): padding mode
"""
def __init__(self, in_channels, out_channels, kernel_size, padding_mode="reflect"):
# NOTE: process each channel independently by setting groups=in_channels
super().__init__(
in_channels,
out_channels,
kernel_size,
groups=in_channels,
bias=False,
padding="same",
padding_mode=padding_mode,
)
nn.init.constant_(self.weight, 1 / kernel_size)
for p in self.parameters():
p.requires_grad = False
class Conv2dPostFilter(BaseModel):
"""A post-filter based on Conv2d
A model proposed in :cite:t:`kaneko2017generative`.
Args:
channels (int): number of channels
kernel_size (tuple): kernel sizes for Conv2d
init_type (str): type of initialization
noise_scale (float): scale of noise
noise_type (str): type of noise. "frame_wise" or "bin_wise"
padding_mode (str): padding mode
smoothing_width (int): Width of smoothing window.
The larger the smoother. Only used at inference time.
"""
def __init__(
self,
in_dim=None,
channels=128,
kernel_size=(5, 5),
init_type="kaiming_normal",
noise_scale=1.0,
noise_type="bin_wise",
padding_mode="zeros",
smoothing_width=-1,
):
super().__init__()
self.in_dim = in_dim
self.noise_type = noise_type
self.noise_scale = noise_scale
C = channels
self.smoothing_width = smoothing_width
assert len(kernel_size) == 2
ks = np.asarray(list(kernel_size))
padding = (ks - 1) // 2
self.conv1 = nn.Sequential(
nn.Conv2d(
2,
C,
kernel_size=ks,
padding=padding,
padding_mode=padding_mode,
),
nn.ReLU(),
)
self.conv2 = nn.Sequential(
nn.Conv2d(
C + 1, C * 2, kernel_size=ks, padding=padding, padding_mode=padding_mode
),
nn.ReLU(),
)
self.conv3 = nn.Sequential(
nn.Conv2d(
C * 2 + 1, C, kernel_size=ks, padding=padding, padding_mode=padding_mode
),
nn.ReLU(),
)
self.conv4 = nn.Conv2d(
C + 1, 1, kernel_size=ks, padding=padding, padding_mode=padding_mode
)
if self.noise_type == "frame_wise":
# noise: (B, T, 1)
self.fc = nn.Linear(1, in_dim)
elif self.noise_type == "bin_wise":
# noise: (B, T, C)
self.fc = None
else:
raise ValueError("Unknown noise type: {}".format(self.noise_type))
init_weights(self, init_type)
def forward(self, x, lengths=None, y=None, is_inference=False):
"""Forward step
Args:
x (torch.Tensor): input tensor of shape (B, T, C)
lengths (torch.Tensor): lengths of shape (B,)
Returns:
torch.Tensor: output tensor of shape (B, T, C)
"""
# (B, T, C) -> (B, 1, T, C):
x = x.unsqueeze(1)
if self.noise_type == "bin_wise":
# (B, C, T)
z = torch.randn_like(x).squeeze(1).transpose(1, 2) * self.noise_scale
# Apply moving average filter at inference time only
if is_inference and self.smoothing_width > 0:
ave_filt = MovingAverage1d(
self.in_dim, self.in_dim, self.smoothing_width
).to(x.device)
z = ave_filt(z)
# (B, 1, T, C)
z = z.transpose(1, 2).unsqueeze(1)
elif self.noise_type == "frame_wise":
# (B, 1, T)
z = torch.randn(x.shape[0], 1, x.shape[2]).to(x.device) * self.noise_scale
# Apply moving average filter at inference time only
if is_inference and self.smoothing_width > 0:
ave_filt = MovingAverage1d(1, 1, self.smoothing_width).to(x.device)
z = ave_filt(z)
# (B, 1, T, 1)
z = z.unsqueeze(-1)
# (B, 1, T, C)
z = self.fc(z)
x_syn = x
y = self.conv1(torch.cat([x_syn, z], dim=1))
y = self.conv2(torch.cat([x_syn, y], dim=1))
y = self.conv3(torch.cat([x_syn, y], dim=1))
residual = self.conv4(torch.cat([x_syn, y], dim=1))
out = x_syn + residual
# (B, 1, T, C) -> (B, T, C)
out = out.squeeze(1)
return out
def inference(self, x, lengths=None):
return self(x, lengths, is_inference=True)
class MultistreamPostFilter(BaseModel):
"""A multi-stream post-filter that applies post-filtering for each feature stream
Currently, post-filtering for MGC, BAP and log-F0 are supported.
Note that it doesn't make much sense to apply post-filtering for other features.
Args:
mgc_postfilter (nn.Module): post-filter for MGC
bap_postfilter (nn.Module): post-filter for BAP
lf0_postfilter (nn.Module): post-filter for log-F0
stream_sizes (list): sizes of each feature stream
mgc_offset (int): offset for MGC. Defaults to 2.
bap_offset (int): offset for BAP. Defaults to 0.
"""
def __init__(
self,
mgc_postfilter: nn.Module,
bap_postfilter: nn.Module,
lf0_postfilter: nn.Module,
stream_sizes: list,
mgc_offset: int = 2,
bap_offset: int = 0,
):
super().__init__()
self.mgc_postfilter = mgc_postfilter
self.bap_postfilter = bap_postfilter
self.lf0_postfilter = lf0_postfilter
self.stream_sizes = stream_sizes
self.mgc_offset = mgc_offset
self.bap_offset = bap_offset
def forward(self, x, lengths=None, y=None, is_inference=False):
"""Forward step
Each feature stream is processed independently.
Args:
x (torch.Tensor): input tensor of shape (B, T, C)
lengths (torch.Tensor): lengths of shape (B,)
Returns:
torch.Tensor: output tensor of shape (B, T, C)
"""
streams = split_streams(x, self.stream_sizes)
if len(streams) == 4:
mgc, lf0, vuv, bap = streams
elif len(streams) == 5:
mgc, lf0, vuv, bap, vuv = streams
elif len(streams) == 6:
mgc, lf0, vuv, bap, vib, vib_flags = streams
else:
raise ValueError("Invalid number of streams")
if self.mgc_postfilter is not None:
if self.mgc_offset > 0:
# keep unchanged for the 0-to-${mgc_offset}-th dim of mgc
mgc0 = mgc[:, :, : self.mgc_offset]
if is_inference:
mgc_pf = self.mgc_postfilter.inference(
mgc[:, :, self.mgc_offset :], lengths
)
else:
mgc_pf = self.mgc_postfilter(mgc[:, :, self.mgc_offset :], lengths)
mgc_pf = torch.cat([mgc0, mgc_pf], dim=-1)
else:
if is_inference:
mgc_pf = self.mgc_postfilter.inference(mgc, lengths)
else:
mgc_pf = self.mgc_postfilter(mgc, lengths)
mgc = mgc_pf
if self.bap_postfilter is not None:
if self.bap_offset > 0:
# keep unchanged for the 0-to-${bap_offset}-th dim of bap
bap0 = bap[:, :, : self.bap_offset]
if is_inference:
bap_pf = self.bap_postfilter.inference(
bap[:, :, self.bap_offset :], lengths
)
else:
bap_pf = self.bap_postfilter(bap[:, :, self.bap_offset :], lengths)
bap_pf = torch.cat([bap0, bap_pf], dim=-1)
else:
if is_inference:
bap_pf = self.bap_postfilter.inference(bap, lengths)
else:
bap_pf = self.bap_postfilter(bap, lengths)
bap = bap_pf
if self.lf0_postfilter is not None:
if is_inference:
lf0 = self.lf0_postfilter.inference(lf0, lengths)
else:
lf0 = self.lf0_postfilter(lf0, lengths)
if len(streams) == 4:
out = torch.cat([mgc, lf0, vuv, bap], dim=-1)
elif len(streams) == 5:
out = torch.cat([mgc, lf0, vuv, bap, vib], dim=-1)
elif len(streams) == 6:
out = torch.cat([mgc, lf0, vuv, bap, vib, vib_flags], dim=-1)
return out
def inference(self, x, lengths):
return self(x, lengths, is_inference=True)
class MelF0MultistreamPostFilter(BaseModel):
def __init__(
self,
mel_postfilter: nn.Module,
lf0_postfilter: nn.Module,
stream_sizes: list,
mel_offset: int = 0,
):
super().__init__()
self.mel_postfilter = mel_postfilter
self.lf0_postfilter = lf0_postfilter
self.stream_sizes = stream_sizes
self.mel_offset = mel_offset
def forward(self, x, lengths=None, y=None, is_inference=False):
"""Forward step
Each feature stream is processed independently.
Args:
x (torch.Tensor): input tensor of shape (B, T, C)
lengths (torch.Tensor): lengths of shape (B,)
Returns:
torch.Tensor: output tensor of shape (B, T, C)
"""
streams = split_streams(x, self.stream_sizes)
assert len(streams) == 3
mel, lf0, vuv = streams
if self.mel_postfilter is not None:
if self.mel_offset > 0:
# keep unchanged for the 0-to-${mgc_offset}-th dim of mgc
mel0 = mel[:, :, : self.mel_offset]
if is_inference:
mel_pf = self.mel_postfilter.inference(
mel[:, :, self.mel_offset :], lengths
)
else:
mel_pf = self.mel_postfilter(mel[:, :, self.mel_offset :], lengths)
mel_pf = torch.cat([mel0, mel_pf], dim=-1)
else:
if is_inference:
mel_pf = self.mel_postfilter.inference(mel, lengths)
else:
mel_pf = self.mel_postfilter(mel, lengths)
mel = mel_pf
if self.lf0_postfilter is not None:
if is_inference:
lf0 = self.lf0_postfilter.inference(lf0, lengths)
else:
lf0 = self.lf0_postfilter(lf0, lengths)
out = torch.cat([mel, lf0, vuv], dim=-1)
return out
def inference(self, x, lengths):
return self(x, lengths, is_inference=True)
class _PadConv2dPostFilter(BaseModel):
def __init__(
self,
in_dim=None,
channels=128,
kernel_size=5,
init_type="kaiming_normal",
padding_side="left",
):
super().__init__()
assert not isinstance(kernel_size, list)
C = channels
ks = kernel_size
padding = (ks - 1) // 2
self.padding = padding
# Treat padding for the feature-axis carefully
# use normal padding for the time-axis (i.e., (padding, padding))
self.padding_side = padding_side
if padding_side == "left":
self.pad = nn.ReflectionPad2d((padding, 0, padding, padding))
elif padding_side == "none":
self.pad = nn.ReflectionPad2d((0, 0, padding, padding))
elif padding_side == "right":
self.pad = nn.ReflectionPad2d((0, padding, padding, padding))
else:
raise ValueError("Invalid padding side")
self.conv1 = nn.Sequential(
nn.Conv2d(2, C, kernel_size=(ks, ks)),
nn.ReLU(),
)
# NOTE: for the subsequent layers, use fixed kernel_size 3 for feature-axis
self.conv2 = nn.Sequential(
nn.Conv2d(
C + 1,
C * 2,
kernel_size=(ks, 3),
padding=(padding, 1),
padding_mode="reflect",
),
nn.ReLU(),
)
self.conv3 = nn.Sequential(
nn.Conv2d(
C * 2 + 1,
C,
kernel_size=(ks, 3),
padding=(padding, 1),
padding_mode="reflect",
),
nn.ReLU(),
)
self.conv4 = nn.Conv2d(
C + 1, 1, kernel_size=(ks, 1), padding=(padding, 0), padding_mode="reflect"
)
self.fc = nn.Linear(1, in_dim)
init_weights(self, init_type)
def forward(self, x, z, lengths=None):
# (B, T, C) -> (B, 1, T, C):
x = x.unsqueeze(1)
z = z.unsqueeze(1)
z = self.fc(z)
x_syn = x
y = self.conv1(torch.cat([self.pad(x_syn), self.pad(z)], dim=1))
if self.padding_side == "left":
x_syn = x[:, :, :, : -self.padding]
elif self.padding_side == "none":
x_syn = x[:, :, :, self.padding : -self.padding]
elif self.padding_side == "right":
x_syn = x[:, :, :, self.padding :]
y = self.conv2(torch.cat([x_syn, y], dim=1))
y = self.conv3(torch.cat([x_syn, y], dim=1))
residual = self.conv4(torch.cat([x_syn, y], dim=1))
out = x_syn + residual
# (B, 1, T, C) -> (B, T, C)
out = out.squeeze(1)
return out
class MultistreamConv2dPostFilter(nn.Module):
"""Conv2d-based multi-stream post-filter designed for MGC
Divide the MGC transformation into low/mid/high dim transfomations
with small overlaps. Overlap is determined by the kernel size.
"""
def __init__(
self,
in_dim=None,
channels=128,
kernel_size=5,
init_type="kaiming_normal",
noise_scale=1.0,
stream_sizes=(8, 20, 30),
):
super().__init__()
assert len(stream_sizes) == 3
self.padding = (kernel_size - 1) // 2
self.noise_scale = noise_scale
self.stream_sizes = stream_sizes
self.low_postfilter = _PadConv2dPostFilter(
stream_sizes[0] + self.padding,
channels=channels,
kernel_size=kernel_size,
init_type=init_type,
padding_side="left",
)
self.mid_postfilter = _PadConv2dPostFilter(
stream_sizes[1] + 2 * self.padding,
channels=channels,
kernel_size=kernel_size,
init_type=init_type,
padding_side="none",
)
self.high_postfilter = _PadConv2dPostFilter(
stream_sizes[2] + self.padding,
channels=channels,
kernel_size=kernel_size,
init_type=init_type,
padding_side="right",
)
def forward(self, x, lengths=None, y=None):
assert x.shape[-1] == sum(self.stream_sizes)
# (B, T, C)
z = torch.randn(x.shape[0], x.shape[1], 1).to(x.device) * self.noise_scale
# Process three streams separately with a overlap width of padding
out1 = self.low_postfilter(x[:, :, : self.stream_sizes[0] + self.padding], z)
out2 = self.mid_postfilter(
x[
:,
:,
self.stream_sizes[0]
- self.padding : sum(self.stream_sizes[:2])
+ self.padding,
],
z,
)
out3 = self.high_postfilter(
x[:, :, sum(self.stream_sizes[:2]) - self.padding :], z
)
# Merge the three outputs
out = torch.cat([out1, out2, out3], dim=-1)
return out
| 17,384 | 31.801887 | 88 | py |
nnsvs | nnsvs-master/nnsvs/util.py | import importlib
import random
from os.path import join
from pathlib import Path
from typing import Any
import numpy as np
import pkg_resources
import pyworld
import torch
from hydra.utils import instantiate
from nnsvs.multistream import get_static_features, get_static_stream_sizes
from nnsvs.usfgan import USFGANWrapper
from omegaconf import OmegaConf
from torch import nn
try:
from parallel_wavegan.utils import load_model
_pwg_available = True
except ImportError:
_pwg_available = False
# mask-related functions were adapted from https://github.com/espnet/espnet
EXAMPLE_DIR = "_example_data"
# Adapted from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
def init_weights(net, init_type="normal", init_gain=0.02):
"""Initialize network weights.
Args:
net (torch.nn.Module): network to initialize
init_type (str): the name of an initialization method:
normal | xavier | kaiming | orthogonal | none.
init_gain (float): scaling factor for normal, xavier and orthogonal.
"""
if init_type == "none":
return
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, "weight") and (
classname.find("Conv") != -1 or classname.find("Linear") != -1
):
if init_type == "normal":
nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == "xavier_normal":
nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == "kaiming_normal":
nn.init.kaiming_normal_(m.weight.data, a=0, mode="fan_in")
elif init_type == "orthogonal":
nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError(
"initialization method [%s] is not implemented" % init_type
)
if hasattr(m, "bias") and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif classname.find("BatchNorm2d") != -1:
# BatchNorm Layer's weight is not a matrix; only normal distribution applies.
nn.init.normal_(m.weight.data, 1.0, init_gain)
nn.init.constant_(m.bias.data, 0.0)
net.apply(init_func)
def get_world_stream_info(
sr: int,
mgc_order: int,
num_windows: int = 3,
vibrato_mode: str = "none",
use_mcep_aperiodicity: bool = False,
mcep_aperiodicity_order: int = 24,
):
"""Get stream sizes for WORLD-based acoustic features
Args:
sr (int): sampling rate
mgc_order (int): order of mel-generalized cepstrum
num_windows (int): number of windows
vibrato_mode (str): vibrato analysis mode
Returns:
list: stream sizes
"""
# [mgc, lf0, vuv, bap]
stream_sizes = [
(mgc_order + 1) * num_windows,
num_windows,
1,
pyworld.get_num_aperiodicities(sr) * num_windows
if not use_mcep_aperiodicity
else mcep_aperiodicity_order + 1,
]
if vibrato_mode == "diff":
# vib
stream_sizes.append(num_windows)
elif vibrato_mode == "sine":
# vib + vib_flags
stream_sizes.append(3 * num_windows)
stream_sizes.append(1)
elif vibrato_mode == "none":
pass
else:
raise RuntimeError("Unknown vibrato mode: {}".format(vibrato_mode))
return stream_sizes
def load_utt_list(utt_list):
"""Load a list of utterances.
Args:
utt_list (str): path to a file containing a list of utterances
Returns:
List[str]: list of utterances
"""
with open(utt_list) as f:
utt_ids = f.readlines()
utt_ids = map(lambda utt_id: utt_id.strip(), utt_ids)
utt_ids = filter(lambda utt_id: len(utt_id) > 0, utt_ids)
return list(utt_ids)
def example_xml_file(key="haruga_kita"):
"""Get the path to an included xml file.
Args:
key (str): key of the file
Returns:
str: path to an example xml file
Raises:
FileNotFoundError: if the file is not found
"""
return pkg_resources.resource_filename(__name__, join(EXAMPLE_DIR, f"{key}.xml"))
def init_seed(seed):
"""Initialize random seed.
Args:
seed (int): random seed
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def dynamic_import(name: str) -> Any:
"""Dynamic import
Args:
name (str): module_name + ":" + class_name
Returns:
Any: class object
"""
mod_name, class_name = name.split(":")
mod = importlib.import_module(mod_name)
return getattr(mod, class_name)
def pad_2d(x, max_len, constant_values=0):
"""Pad a 2d-tensor.
Args:
x (torch.Tensor): tensor to pad
max_len (int): maximum length of the tensor
constant_values (int, optional): value to pad with. Default: 0
Returns:
torch.Tensor: padded tensor
"""
x = np.pad(
x,
[(0, max_len - len(x)), (0, 0)],
mode="constant",
constant_values=constant_values,
)
return x
def make_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):
"""Make mask tensor containing indices of padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor. If set, masks will be
the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
Returns:
Tensor: Mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
"""
if length_dim == 0:
raise ValueError("length_dim cannot be 0: {}".format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
if maxlen is None:
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(
slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
)
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def make_non_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
Returns:
ByteTensor: mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
"""
return ~make_pad_mask(lengths, xs, length_dim, maxlen)
class PyTorchStandardScaler(nn.Module):
"""PyTorch module for standardization.
Args:
mean (torch.Tensor): mean
scale (torch.Tensor): scale
"""
def __init__(self, mean, scale):
super().__init__()
self.mean_ = nn.Parameter(mean, requires_grad=False)
self.scale_ = nn.Parameter(scale, requires_grad=False)
def transform(self, x):
return (x - self.mean_) / self.scale_
def inverse_transform(self, x):
return x * self.scale_ + self.mean_
class StandardScaler:
"""sklearn.preprocess.StandardScaler like class with only
transform functionality
Args:
mean (np.ndarray): mean
var (np.ndarray): variance
scale (np.ndarray): scale
"""
def __init__(self, mean, var, scale):
self.mean_ = mean
self.var_ = var
# NOTE: scale may not exactly same as np.sqrt(var)
self.scale_ = scale
def transform(self, x):
return (x - self.mean_) / self.scale_
def inverse_transform(self, x):
return x * self.scale_ + self.mean_
class MinMaxScaler:
"""sklearn.preprocess.MinMaxScaler like class with only
transform functionality
Args:
min (np.ndarray): minimum
scale (np.ndarray): scale
data_min (np.ndarray): minimum of input data
data_max (np.ndarray): maximum of input data
feature_range (tuple): (min, max)
"""
def __init__(self, min, scale, data_min=None, data_max=None, feature_range=(0, 1)):
self.min_ = min
self.scale_ = scale
self.data_min_ = data_min
self.data_max_ = data_max
self.feature_range = feature_range
def transform(self, x):
return self.scale_ * x + self.min_
def inverse_transform(self, x):
return (x - self.min_) / self.scale_
def extract_static_scaler(out_scaler, model_config):
"""Extract scaler for static features
Args:
out_scaler (StandardScaler or MinMaxScaler): target scaler
model_config (dict): model config that contain stream information
Returns:
StandardScaler or MinMaxScaler: scaler for static features
"""
mean_ = get_static_features(
out_scaler.mean_.reshape(1, 1, out_scaler.mean_.shape[-1]),
model_config.num_windows,
model_config.stream_sizes,
model_config.has_dynamic_features,
)
mean_ = np.concatenate(mean_, -1).reshape(1, -1)
var_ = get_static_features(
out_scaler.var_.reshape(1, 1, out_scaler.var_.shape[-1]),
model_config.num_windows,
model_config.stream_sizes,
model_config.has_dynamic_features,
)
var_ = np.concatenate(var_, -1).reshape(1, -1)
scale_ = get_static_features(
out_scaler.scale_.reshape(1, 1, out_scaler.scale_.shape[-1]),
model_config.num_windows,
model_config.stream_sizes,
model_config.has_dynamic_features,
)
scale_ = np.concatenate(scale_, -1).reshape(1, -1)
static_scaler = StandardScaler(mean_, var_, scale_)
return static_scaler
def load_vocoder(path, device, acoustic_config):
"""Load vocoder model from a given checkpoint path
Note that the path needs to be a checkpoint of PWG or USFGAN.
Args:
path (str or Path): Path to the vocoder model
device (str): Device to load the model
acoustic_config (dict): Acoustic model config
Returns:
tuple: (vocoder, vocoder_in_scaler, vocoder_config)
"""
if not _pwg_available:
raise RuntimeError(
"parallel_wavegan is required to load pre-trained checkpoint."
)
path = Path(path) if isinstance(path, str) else path
model_dir = path.parent
if (model_dir / "vocoder_model.yaml").exists():
# packed model
vocoder_config = OmegaConf.load(model_dir / "vocoder_model.yaml")
elif (model_dir / "config.yml").exists():
# PWG checkpoint
vocoder_config = OmegaConf.load(model_dir / "config.yml")
else:
# usfgan
vocoder_config = OmegaConf.load(model_dir / "config.yaml")
if "generator" in vocoder_config and "discriminator" in vocoder_config:
# usfgan
checkpoint = torch.load(
path,
map_location=lambda storage, loc: storage,
)
vocoder = instantiate(vocoder_config.generator).to(device)
vocoder.load_state_dict(checkpoint["model"]["generator"])
vocoder.remove_weight_norm()
vocoder = USFGANWrapper(vocoder_config, vocoder)
stream_sizes = get_static_stream_sizes(
acoustic_config.stream_sizes,
acoustic_config.has_dynamic_features,
acoustic_config.num_windows,
)
# Extract scaler params for [mgc, bap]
if vocoder_config.data.aux_feats == ["mcep", "codeap"]:
# streams: (mgc, lf0, vuv, bap)
mean_ = np.load(model_dir / "in_vocoder_scaler_mean.npy")
var_ = np.load(model_dir / "in_vocoder_scaler_var.npy")
scale_ = np.load(model_dir / "in_vocoder_scaler_scale.npy")
mgc_end_dim = stream_sizes[0]
bap_start_dim = sum(stream_sizes[:3])
bap_end_dim = sum(stream_sizes[:4])
vocoder_in_scaler = StandardScaler(
np.concatenate([mean_[:mgc_end_dim], mean_[bap_start_dim:bap_end_dim]]),
np.concatenate([var_[:mgc_end_dim], var_[bap_start_dim:bap_end_dim]]),
np.concatenate(
[scale_[:mgc_end_dim], scale_[bap_start_dim:bap_end_dim]]
),
)
else:
# streams: (mel, lf0, vuv)
mel_dim = stream_sizes[0]
vocoder_in_scaler = StandardScaler(
np.load(model_dir / "in_vocoder_scaler_mean.npy")[:mel_dim],
np.load(model_dir / "in_vocoder_scaler_var.npy")[:mel_dim],
np.load(model_dir / "in_vocoder_scaler_scale.npy")[:mel_dim],
)
else:
# PWG
vocoder = load_model(path, config=vocoder_config).to(device)
vocoder.remove_weight_norm()
vocoder_in_scaler = StandardScaler(
np.load(model_dir / "in_vocoder_scaler_mean.npy"),
np.load(model_dir / "in_vocoder_scaler_var.npy"),
np.load(model_dir / "in_vocoder_scaler_scale.npy"),
)
vocoder.eval()
return vocoder, vocoder_in_scaler, vocoder_config
| 13,862 | 30.578588 | 89 | py |
nnsvs | nnsvs-master/nnsvs/pitch.py | """This module provides functionality for pitch analysis.
References:
Nakano et al, "An Automatic Singing Skill Evaluation Method
for Unknown Melodies Using Pitch Interval Accuracy and Vibrato Features"
Proc. Interspeech 2006.
山田 et al, "HMM に基づく歌声合成のためのビブラートモデル化"
IPSJ SIG Tech. Report 2009.
Note that vibrato extraction method in this module is exerimental.
Because details of the vibrato extraction method are not described
in the above papers and not trivial to implement (in my opinion),
my implementation may not work well compared to the original author's one.
Also note that there are a lot of tunable parameters (threshold,
window size, min/max extent, cut-off frequency, etc.).
If you want to get maximum performance, you might want to tune these
parameters with your dataset.
I tested this code with kiritan_singing and nit-song070 database.
"""
import librosa
import numpy as np
import torch
from nnsvs.dsp import lowpass_filter
from scipy.signal import argrelmax, argrelmin
_c4_hz = 440 * 2 ** (3 / 12 - 1)
_c4_cent = 4800
def hz_to_cent_based_c4(hz):
"""Convert Hz to cent based on C4
Args:
hz (np.ndarray): array of Hz
Returns:
np.ndarray: array of cent
"""
out = hz.copy()
nonzero_indices = np.where(hz > 0)[0]
out[nonzero_indices] = (
1200 * np.log(hz[nonzero_indices] / _c4_hz) / np.log(2) + _c4_cent
)
return out
def cent_to_hz_based_c4(cent):
"""Convert cent to Hz based on C4
Args:
cent (np.ndarray): array of cent
Returns:
np.ndarray: array of Hz
"""
out = cent.copy()
nonzero_indices = np.where(cent > 0)[0]
out[nonzero_indices] = (
np.exp((cent[nonzero_indices] - _c4_cent) * np.log(2) / 1200) * _c4_hz
)
return out
def nonzero_segments(f0):
"""Find nonzero segments
Args:
f0 (np.ndarray): array of f0
Returns:
list: list of (start, end)
"""
vuv = f0 > 0
started = False
s, e = 0, 0
segments = []
for idx in range(len(f0)):
if vuv[idx] > 0 and not started:
started = True
s = idx
elif started and (vuv[idx] <= 0):
e = idx
started = False
segments.append((s, e))
else:
pass
if started and vuv[-1] > 0:
segments.append((s, len(vuv) - 1))
return segments
def note_segments(lf0_score_denorm):
"""Compute note segments (start and end indices) from log-F0
Note that unvoiced frames must be set to 0 in advance.
Args:
lf0_score_denorm (Tensor): (B, T)
Returns:
list: list of note (start, end) indices
"""
segments = []
for s, e in nonzero_segments(lf0_score_denorm):
out = torch.sign(torch.abs(torch.diff(lf0_score_denorm[s : e + 1])))
transitions = torch.where(out > 0)[0]
note_start, note_end = s, -1
for pos in transitions:
note_end = int(s + pos)
segments.append((note_start, note_end))
note_start = note_end + 1
# Handle last note
while (
note_start < len(lf0_score_denorm) - 1 and lf0_score_denorm[note_start] <= 0
):
note_start += 1
note_end = note_start + 1
while note_end < len(lf0_score_denorm) - 1 and lf0_score_denorm[note_end] > 0:
note_end += 1
if note_end != note_start + 1:
segments.append((note_start, note_end))
return segments
def compute_f0_correction_ratio(
f0,
f0_score,
edges_to_be_excluded=50,
out_of_tune_threshold=200,
correction_threshold=100,
):
"""Compute f0 correction ratio
Args:
f0 (np.ndarray): array of f0
f0_score (np.ndarray): array of f0 score
Returns:
float: correction ratio to multiplied to F0 (i.e. f0 * ratio)
"""
segments = note_segments(torch.from_numpy(f0_score))
center_f0s = []
center_score_f0s = []
# edges_to_be_excluded = 50 # 0.25 sec for excluding overshoot/preparation
for s, e in segments:
L = e - s
if L > edges_to_be_excluded * 2:
center_f0s.append(f0[s + edges_to_be_excluded : e - edges_to_be_excluded])
center_score_f0s.append(
f0_score[s + edges_to_be_excluded : e - edges_to_be_excluded]
)
center_f0s = np.concatenate(center_f0s)
center_score_f0s = np.concatenate(center_score_f0s)
# Compute pitch ratio to be multiplied
nonzero_indices = (center_f0s > 0) & (center_score_f0s > 0)
ratio = center_score_f0s[nonzero_indices] / center_f0s[nonzero_indices]
# Exclude too out-of-tune frames (over 2 semitone)
up_threshold = np.exp(out_of_tune_threshold * np.log(2) / 1200)
low_threshold = np.exp(-out_of_tune_threshold * np.log(2) / 1200)
ratio = ratio[(ratio < up_threshold) & (ratio > low_threshold)]
global_offset = ratio.mean()
# Avoid corrections over semi-tone
# If more than semi-tone pitch correction is needed, it is better to correct
# data by hand or fix musicxml or UST instead.
up_threshold = np.exp(correction_threshold * np.log(2) / 1200)
low_threshold = np.exp(-correction_threshold * np.log(2) / 1200)
if global_offset > up_threshold or global_offset < low_threshold:
print(
f"""warn: more than 1 semitone pitch correction is needed.
global_offset: {global_offset} cent.
It is likely that manual pitch corrections are preferable."""
)
global_offset = np.clip(global_offset, low_threshold, up_threshold)
return global_offset
def extract_vibrato_parameters_impl(pitch_seg, sr):
"""Extract vibrato parameters for a single pitch segment
Nakano et al, "An Automatic Singing Skill Evaluation Method
for Unknown Melodies Using Pitch Interval Accuracy and Vibrato Features"
Proc. Interspeech 2006.
山田 et al, "HMM に基づく歌声合成のためのビブラートモデル化"
IPSJ SIG Tech. Report 2009.
Args:
pitch_seg (np.ndarray): array of pitch
sr (int): sampling rate
Returns:
tuple: (R, E, m_a, m_f)
"""
peak_high_pos = argrelmax(pitch_seg)[0]
peak_low_pos = argrelmin(pitch_seg)[0]
m_a = np.zeros(len(pitch_seg))
m_f = np.zeros(len(pitch_seg))
if len(peak_high_pos) != len(peak_low_pos) + 1:
print("Warning! Probably a bug...T.T")
print(peak_high_pos, peak_low_pos)
return None, None, None, None
peak_high_pos_diff = np.diff(peak_high_pos)
peak_low_pos_diff = np.diff(peak_low_pos)
R = np.zeros(len(peak_high_pos_diff) + len(peak_low_pos_diff))
R[0::2] = peak_high_pos_diff
R[1::2] = peak_low_pos_diff
m_f_ind = np.zeros(len(R), dtype=int)
m_f_ind[0::2] = peak_high_pos[:-1]
m_f_ind[1::2] = peak_low_pos[:-1]
m_f[m_f_ind] = (1 / R) * sr
peak_high_pitch = pitch_seg[peak_high_pos]
peak_low_pitch = pitch_seg[peak_low_pos]
E = np.zeros(len(R))
E[0::2] = (peak_high_pitch[1:] + peak_high_pitch[:-1]) / 2 - peak_low_pitch
E[1::2] = peak_high_pitch[1:-1] - (peak_low_pitch[1:] + peak_low_pitch[:-1]) / 2
m_a_ind = np.zeros(len(R), dtype=int)
m_a_ind[0::2] = peak_low_pos
m_a_ind[1::2] = peak_high_pos[1:-1]
m_a[m_a_ind] = 0.5 * E
rate = 1 / R.mean() * sr
extent = 0.5 * E.mean()
print(f"Rate: {rate}, Extent: {extent}")
return R, E, m_a, m_f
def compute_extent(pitch_seg):
"""Compute extent of a pitch segment
Args:
pitch_seg (np.ndarray): array of pitch
Returns:
np.ndarray: array of extent
"""
peak_high_pos = argrelmax(pitch_seg)[0]
peak_low_pos = argrelmin(pitch_seg)[0]
if len(peak_high_pos) == 1 or len(peak_low_pos) == 1:
return np.array([-1])
if len(peak_high_pos) < len(peak_low_pos):
peak_low_pos = peak_low_pos[:-2]
elif len(peak_high_pos) == len(peak_low_pos):
peak_low_pos = peak_low_pos[:-1]
peak_high_pitch = pitch_seg[peak_high_pos]
peak_low_pitch = pitch_seg[peak_low_pos]
peak_high_pos_diff = np.diff(peak_high_pos)
peak_low_pos_diff = np.diff(peak_low_pos)
# TODO: would probably be a bug...
if len(peak_high_pitch) != len(peak_low_pitch) + 1:
return np.array([-1])
E = np.zeros(len(peak_high_pos_diff) + len(peak_low_pos_diff))
E[0::2] = (peak_high_pitch[1:] + peak_high_pitch[:-1]) / 2 - peak_low_pitch
E[1::2] = peak_high_pitch[1:-1] - (peak_low_pitch[1:] + peak_low_pitch[:-1]) / 2
return E
def extract_smoothed_f0(f0, sr, cutoff=8):
"""Extract smoothed f0 by low-pass filtering
Note that the low-pass filter is only applied to voiced segments.
Args:
f0 (np.ndarray): array of f0
sr (int): sampling rate
cutoff (float): cutoff frequency
Returns:
np.ndarray: array of smoothed f0
"""
segments = nonzero_segments(f0)
f0_smooth = f0.copy()
for s, e in segments:
f0_smooth[s:e] = lowpass_filter(f0[s:e], sr, cutoff=cutoff)
return f0_smooth
def extract_smoothed_continuous_f0(f0, sr, cutoff=20):
"""Extract smoothed continuous f0 by low-pass filtering
Note that the input must be continuous F0 or log-F0.
Args:
f0 (np.ndarray): array of continuous f0
sr (int): sampling rate
cutoff (float): initial cutoff frequency
Returns:
np.ndarray: array of smoothed continuous f0
"""
is_2d = len(f0.shape) == 2
f0 = f0.reshape(-1) if is_2d else f0
# Ref: https://bit.ly/3SOePFw
f0_smooth = lowpass_filter(f0, sr, cutoff=cutoff)
# Fallback case: shound't happen I believe
# NOTE: hard-coded for now
next_cutoff = 50
while (f0_smooth < 0).any():
f0_smooth = lowpass_filter(f0, sr, cutoff=next_cutoff)
next_cutoff *= 2
f0_smooth = f0_smooth.reshape(len(f0), 1) if is_2d else f0_smooth
return f0_smooth
def extract_vibrato_likelihood(
f0_smooth, sr, win_length=32, n_fft=128, min_freq=3, max_freq=8
):
"""Extract vibrato likelihood
Args:
f0_smooth (np.ndarray): array of smoothed f0
sr (int): sampling rate
win_length (int): window length
n_fft (int): FFT size
min_freq (float): minimum frequency of the vibrato
max_freq (float): maximum frequency of the vibrato
Returns:
np.ndarray: array of vibrato likelihood
"""
# STFT on 1st order diffference of F0
X = np.abs(
librosa.stft(
np.diff(f0_smooth),
hop_length=1,
win_length=win_length,
n_fft=n_fft,
window="hann",
)
)
X_norm = X / (X.sum(0) + 1e-7)
freq_per_bin = sr / n_fft
min_freq_bin = int(min_freq / freq_per_bin)
max_freq_bin = int(max_freq / freq_per_bin)
# Compute vibrato likelhiood
St = np.abs(np.diff(X_norm, axis=0)).sum(0)
Ft = X_norm[min_freq_bin:max_freq_bin, :].sum(0)
vibrato_likelihood = St * Ft
return vibrato_likelihood
def interp_vibrato(m_f):
"""Interpolate a sequence of vibrato parameter by linear interpolation
Args:
m_f (np.ndarray): array of vibrato parameter
Returns:
np.ndarray: array of vibrato parameter
"""
nonzero_indices = np.where(m_f > 0)[0]
nonzero_indices = [0] + list(nonzero_indices) + [len(m_f) - 1]
out = np.interp(np.arange(len(m_f)), nonzero_indices, m_f[nonzero_indices])
return out
def extract_vibrato_parameters(
pitch,
vibrato_likelihood,
sr=100,
threshold=0.12,
min_cross_count=5,
min_extent=30,
max_extent=150,
interp_params=True,
smooth_params=False,
smooth_width=15,
clip_extent=True,
):
"""Extract vibrato parameters
Args:
pitch (np.ndarray): array of pitch (smoothed f0)
vibrato_likelihood (np.ndarray): array of vibrato likelihood
sr (int): sampling rate
threshold (float): threshold of vibrato likelihood
min_cross_count (int): minimum number of cross points
min_extent (int): minimum extent of vibrato (cent)
max_extent (int): maximum extent of vibrato (cent)
interp_params (bool): whether to interpolate vibrato parameters
smooth_params (bool): whether to smooth vibrato parameters
smooth_width (int): width of smoothing window
clip_extent (bool): whether to clip extent
Returns:
tuple: tuple of vibrato parameters
"""
T = len(vibrato_likelihood)
vibrato_flags = np.zeros(T, dtype=int)
m_a = np.zeros(T)
m_f = np.zeros(T)
peak_high_pos = argrelmax(pitch)[0]
peak_low_pos = argrelmin(pitch)[0]
# iterate over every peak position
peak_high_idx = 0
while peak_high_idx < len(peak_high_pos):
peak_frame_idx = peak_high_pos[peak_high_idx]
found = False
if vibrato_likelihood[peak_frame_idx] > threshold:
# Initial positions for vibrato section
start_index = peak_frame_idx
peaks = peak_low_pos[peak_low_pos > peak_frame_idx]
if len(peaks) > 0:
end_index = peaks[0]
else:
peak_high_idx += 1
continue
next_start_peak_high_idx = -1
# Find a peak position that is close to the next non-speech segment
# assuming that there's a non-speech segment right after vibrato
# NOTE: we may want to remove this constraint
peak_high_pos_rest = peak_high_pos[peak_high_pos > peak_frame_idx]
for frame_idx in range(end_index, T):
if pitch[frame_idx] <= 0:
peaks = peak_high_pos_rest[peak_high_pos_rest < frame_idx]
if len(peaks) > 0:
end_index = peaks[-1]
next_start_peak_high_idx = (
len(peak_high_pos[peak_high_pos < end_index]) + 1
)
break
# Set the search width (backward)
search_width_backward = 0
for frame_idx in range(start_index, 0, -1):
if pitch[frame_idx] <= 0:
peaks_backward = peak_high_pos[
(peak_high_pos < peak_frame_idx) & (peak_high_pos > frame_idx)
]
if len(peaks_backward) > 0:
backward = peaks_backward[0]
search_width_backward = len(
peak_high_pos[
(peak_high_pos > backward)
& (peak_high_pos <= peak_frame_idx)
]
)
break
# Find a peak position that satisfies the following vibrato constraints
# 1) more than 5 times crossing
# 2) 30 ~ 150 cent oscillation
estimate_start_index = start_index
rate = 0
for peak_idx in range(
max(peak_high_idx - search_width_backward, 0), peak_high_idx
):
if peak_high_pos[peak_idx] >= T:
break
f0_seg = pitch[peak_high_pos[peak_idx] : end_index]
# Check if the segment satisfies vibrato constraints
m = f0_seg.mean()
cross_count = len(np.where(np.diff(np.sign(f0_seg - m)))[0])
# Find the start_index so that the vibrato section has more than 5 crossing
E = compute_extent(f0_seg)
extent = 0.5 * E.mean()
having_large_deviation = ((0.5 * E) > max_extent * 2).any()
if (
cross_count >= min_cross_count
and cross_count >= rate
and extent >= min_extent
and extent <= max_extent
and not having_large_deviation
and (E > 0).all()
):
rate = cross_count
estimate_start_index = peak_high_pos[peak_idx]
start_index = estimate_start_index
if rate >= min_cross_count:
R, E, m_a_seg, m_f_seg = extract_vibrato_parameters_impl(
pitch[start_index - 1 : end_index + 2], sr
)
if m_a_seg is None:
found = False
break
found = True
vibrato_flags[start_index:end_index] = 1
if interp_params:
m_a_seg = interp_vibrato(m_a_seg)
m_f_seg = np.clip(interp_vibrato(m_f_seg), 3, 8)
if smooth_params:
m_a_seg = np.convolve(
m_a_seg, np.ones(smooth_width) / smooth_width, mode="same"
)
m_f_seg = np.convolve(
m_f_seg, np.ones(smooth_width) / smooth_width, mode="same"
)
if clip_extent:
m_a_seg = np.clip(m_a_seg, min_extent, max_extent)
m_a[start_index:end_index] = m_a_seg[1:-2]
m_f[start_index:end_index] = m_f_seg[1:-2]
assert next_start_peak_high_idx > peak_high_idx
peak_high_idx = next_start_peak_high_idx
if not found:
peak_high_idx += 1
return vibrato_flags, m_a, m_f
def gen_sine_vibrato(f0, sr, m_a, m_f, scale=1.0):
"""Generate F0 with sine-based vibrato
Args:
f0 (ndarray): fundamental frequency
sr (int): sampling rate
m_a (ndarray): amplitude of vibrato
m_f (ndarray): frequency of vibrato
scale (float): scale factor
Returns:
ndarray: F0 with sine-based vibrato
"""
f0_gen = f0.copy()
voiced_end_indices = np.asarray([e for _, e in nonzero_segments(f0)])
for s, e in nonzero_segments(m_a):
# limit vibrato rate to [3, 8] Hz
m_f_seg = np.clip(m_f[s:e], 3, 8)
# limit vibrato extent to [30, 150] cent
m_a_seg = np.clip(m_a[s:e], 30, 150)
cent = scale * m_a_seg * np.sin(2 * np.pi / sr * m_f_seg * np.arange(0, e - s))
new_f0 = f0[s:e] * np.exp(cent * np.log(2) / 1200)
f0_gen[s:e] = new_f0
# NOTE: this is a hack to avoid discontinuity at the end of vibrato
voiced_ends_next_to_vibrato = voiced_end_indices[voiced_end_indices > e]
if len(voiced_ends_next_to_vibrato) > 0:
voiced_end = voiced_ends_next_to_vibrato[0]
f0_gen[s:voiced_end] = lowpass_filter(f0_gen[s:voiced_end], sr, cutoff=12)
return f0_gen
| 18,639 | 30.863248 | 91 | py |
nnsvs | nnsvs-master/nnsvs/train_util.py | import os
import random
import shutil
import sys
import types
from glob import glob
from multiprocessing import Manager
from os.path import join
from pathlib import Path
import hydra
import joblib
import librosa
import librosa.display
import matplotlib.pyplot as plt
import mlflow
import numpy as np
import pysptk
import pyworld
import torch
import torch.distributed as dist
from hydra.utils import get_original_cwd, to_absolute_path
from nnmnkwii import metrics
from nnsvs.base import PredictionType
from nnsvs.gen import gen_world_params
from nnsvs.logger import getLogger
from nnsvs.mdn import mdn_get_most_probable_sigma_and_mu
from nnsvs.multistream import (
get_static_features,
get_static_stream_sizes,
get_windows,
multi_stream_mlpg,
select_streams,
split_streams,
)
from nnsvs.pitch import lowpass_filter, note_segments
from nnsvs.util import MinMaxScaler, StandardScaler, init_seed, pad_2d
from omegaconf import DictConfig, ListConfig, OmegaConf
from sklearn.preprocessing import MinMaxScaler as SKMinMaxScaler
from torch import nn, optim
from torch.cuda.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils import data as data_utils
from torch.utils.data.sampler import BatchSampler
from torch.utils.tensorboard import SummaryWriter
plt.style.use("seaborn-whitegrid")
class ShuffleBatchSampler(BatchSampler):
def __init__(
self,
batches,
drop_last=False,
shuffle=True,
):
self.shuffle = shuffle
self.batches = batches
self.drop_last = drop_last
def __iter__(self):
batches = self.batches
if self.shuffle:
random.shuffle(batches)
return iter(batches)
def __len__(self):
return len(self.batches)
def log_params_from_omegaconf_dict(params):
for param_name, element in params.items():
_explore_recursive(param_name, element)
def _explore_recursive(parent_name, element):
if isinstance(element, DictConfig):
for k, v in element.items():
if isinstance(v, DictConfig) or isinstance(v, ListConfig):
_explore_recursive(f"{parent_name}.{k}", v)
else:
mlflow.log_param(f"{parent_name}.{k}", v)
elif isinstance(element, ListConfig):
for i, v in enumerate(element):
mlflow.log_param(f"{parent_name}.{i}", v)
def num_trainable_params(model):
"""Count the number of trainable parameters in the model.
Args:
model (torch.nn.Module): Model to count the number of trainable parameters.
Returns:
int: Number of trainable parameters.
"""
parameters = filter(lambda p: p.requires_grad, model.parameters())
return sum([np.prod(p.size()) for p in parameters])
def get_filtered_files(
data_root,
logger,
filter_long_segments=False,
filter_num_frames=6000,
filter_min_num_frames=0,
):
files = sorted(glob(join(data_root, "*-feats.npy")))
if filter_long_segments:
valid_files = []
num_filtered = 0
for path in files:
length = len(np.load(path))
if length < filter_num_frames and length > filter_min_num_frames:
valid_files.append(path)
else:
if logger is not None:
logger.info(f"Filtered: {path} is too long or short: {length}")
num_filtered += 1
if num_filtered > 0 and logger is not None:
logger.info(f"Filtered {num_filtered} files")
# Print stats of lengths
if logger is not None:
lengths = [len(np.load(f)) for f in files]
logger.debug(f"[before] Size of dataset: {len(files)}")
logger.debug(f"[before] maximum length: {max(lengths)}")
logger.debug(f"[before] minimum length: {min(lengths)}")
logger.debug(f"[before] mean length: {np.mean(lengths)}")
logger.debug(f"[before] std length: {np.std(lengths)}")
logger.debug(f"[before] median length: {np.median(lengths)}")
files = valid_files
lengths = [len(np.load(f)) for f in files]
if logger is not None:
logger.debug(f"[after] Size of dataset: {len(files)}")
logger.debug(f"[after] maximum length: {max(lengths)}")
logger.debug(f"[after] minimum length: {min(lengths)}")
logger.debug(f"[after] mean length: {np.mean(lengths)}")
logger.debug(f"[after] std length: {np.std(lengths)}")
logger.debug(f"[after] median length: {np.median(lengths)}")
else:
lengths = [len(np.load(f)) for f in files]
return files, lengths
def _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
if len(batch) == 0:
return 0
if len(batch) == max_sentences:
return 1
if num_tokens > max_tokens:
return 1
return 0
def batch_by_size(
indices,
num_tokens_fn,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
"""
max_tokens = max_tokens if max_tokens is not None else sys.maxsize
max_sentences = max_sentences if max_sentences is not None else sys.maxsize
bsz_mult = required_batch_size_multiple
if isinstance(indices, types.GeneratorType):
indices = np.fromiter(indices, dtype=np.int64, count=-1)
sample_len = 0
sample_lens = []
batch = []
batches = []
for i in range(len(indices)):
idx = indices[i]
num_tokens = num_tokens_fn(idx)
sample_lens.append(num_tokens)
sample_len = max(sample_len, num_tokens)
assert (
sample_len <= max_tokens
), "sentence at index {} of size {} exceeds max_tokens " "limit of {}!".format(
idx, sample_len, max_tokens
)
num_tokens = (len(batch) + 1) * sample_len
if _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
mod_len = max(
bsz_mult * (len(batch) // bsz_mult),
len(batch) % bsz_mult,
)
batches.append(batch[:mod_len])
batch = batch[mod_len:]
sample_lens = sample_lens[mod_len:]
sample_len = max(sample_lens) if len(sample_lens) > 0 else 0
batch.append(idx)
if len(batch) > 0:
batches.append(batch)
return batches
class Dataset(data_utils.Dataset): # type: ignore
"""Dataset for numpy files
Args:
in_paths (list): List of paths to input files
out_paths (list): List of paths to output files
"""
def __init__(self, in_paths, out_paths, lengths, shuffle=False, allow_cache=True):
self.in_paths = in_paths
self.out_paths = out_paths
self.lengths = lengths
self.sort_by_len = True
self.shuffle = shuffle
self.allow_cache = allow_cache
if allow_cache:
self.manager = Manager()
self.caches = self.manager.list()
self.caches += [() for _ in range(len(in_paths))]
def __getitem__(self, idx):
"""Get a pair of input and target
Args:
idx (int): index of the pair
Returns:
tuple: input and target in numpy format
"""
if self.allow_cache and len(self.caches[idx]) != 0:
return self.caches[idx]
x, y = np.load(self.in_paths[idx]), np.load(self.out_paths[idx])
if self.allow_cache:
self.caches[idx] = (x, y)
return x, y
def num_tokens(self, index):
return self.lengths[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order.
"""
if self.shuffle:
indices = np.random.permutation(len(self))
if self.sort_by_len:
indices = indices[
np.argsort(np.array(self.lengths)[indices], kind="mergesort")
]
else:
indices = np.arange(len(self))
return indices
def __len__(self):
"""Returns the size of the dataset
Returns:
int: size of the dataset
"""
return len(self.in_paths)
def ensure_divisible_by(feats, N):
"""Ensure that the number of frames is divisible by N.
Args:
feats (np.ndarray): Input features.
N (int): Target number of frames.
Returns:
np.ndarray: Input features with number of frames divisible by N.
"""
if N == 1:
return feats
mod = len(feats) % N
if mod != 0:
feats = feats[: len(feats) - mod]
return feats
def collate_fn_default(batch, reduction_factor=1, stream_sizes=None, streams=None):
"""Create batch
Args:
batch(tuple): List of tuples
- x[0] (ndarray,int) : list of (T, D_in)
- x[1] (ndarray,int) : list of (T, D_out)
reduction_factor (int): Reduction factor.
Returns:
tuple: Tuple of batch
- x (FloatTensor) : Network inputs (B, max(T), D_in)
- y (FloatTensor) : Network targets (B, max(T), D_out)
- lengths (LongTensor): Input lengths
"""
lengths = [len(ensure_divisible_by(x[0], reduction_factor)) for x in batch]
max_len = max(lengths)
x_batch = torch.stack(
[
torch.from_numpy(
pad_2d(ensure_divisible_by(x[0], reduction_factor), max_len)
)
for x in batch
]
)
if stream_sizes is not None:
assert streams is not None
y_batch = torch.stack(
[
torch.from_numpy(
pad_2d(
ensure_divisible_by(
select_streams(x[1], stream_sizes, streams),
reduction_factor,
),
max_len,
)
)
for x in batch
]
)
else:
y_batch = torch.stack(
[
torch.from_numpy(
pad_2d(ensure_divisible_by(x[1], reduction_factor), max_len)
)
for x in batch
]
)
l_batch = torch.tensor(lengths, dtype=torch.long)
return x_batch, y_batch, l_batch
def collate_fn_random_segments(batch, max_time_frames=256):
"""Collate function with random segments
Use segmented frames instead of padded entire frames. No padding is performed.
.. warning::
max_time_frames must be larger than the shortest sequence in the training data.
Args:
batch (tuple): tupls of lit
- x[0] (ndarray,int) : list of (T, D_in)
- x[1] (ndarray,int) : list of (T, D_out)
max_time_frames (int, optional): Number of time frames. Defaults to 256.
Returns:
tuple: Tuple of batch
- x (FloatTensor) : Network inputs (B, max(T), D_in)
- y (FloatTensor) : Network targets (B, max(T), D_out)
- lengths (LongTensor): Input lengths
"""
xs, ys = [b[0] for b in batch], [b[1] for b in batch]
lengths = [len(x[0]) for x in batch]
start_frames = np.array(
[np.random.randint(0, xl - max_time_frames) for xl in lengths]
)
starts = start_frames
ends = starts + max_time_frames
x_cut = [torch.from_numpy(x[s:e]) for x, s, e in zip(xs, starts, ends)]
y_cut = [torch.from_numpy(y[s:e]) for y, s, e in zip(ys, starts, ends)]
x_batch = torch.stack(x_cut).float()
y_batch = torch.stack(y_cut).float()
# NOTE: we don't actually need lengths since we don't perform padding
# but just for consistency with collate_fn_default
l_batch = torch.tensor([max_time_frames] * len(lengths), dtype=torch.long)
return x_batch, y_batch, l_batch
def get_data_loaders(data_config, collate_fn, logger):
"""Get data loaders for training and validation.
Args:
data_config (dict): Data configuration.
collate_fn (callable): Collate function.
logger (logging.Logger): Logger.
Returns:
dict: Data loaders.
"""
if "filter_long_segments" not in data_config:
logger.warning(
"filter_long_segments is not found in the data config. Consider set it explicitly."
)
logger.info("Disable filtering for long segments.")
filter_long_segments = False
else:
filter_long_segments = data_config.filter_long_segments
if "filter_num_frames" not in data_config:
logger.warning(
"filter_num_frames is not found in the data config. Consider set it explicitly."
)
filter_num_frames = 6000
filter_min_num_frames = 0
else:
filter_num_frames = data_config.filter_num_frames
filter_min_num_frames = data_config.filter_min_num_frames
data_loaders = {}
samplers = {}
for phase in ["train_no_dev", "dev"]:
in_dir = to_absolute_path(data_config[phase].in_dir)
out_dir = to_absolute_path(data_config[phase].out_dir)
train = phase.startswith("train")
in_files, lengths = get_filtered_files(
in_dir,
logger,
filter_long_segments=filter_long_segments,
filter_num_frames=filter_num_frames,
filter_min_num_frames=filter_min_num_frames,
)
out_files, _ = get_filtered_files(
out_dir,
None,
filter_long_segments=filter_long_segments,
filter_num_frames=filter_num_frames,
filter_min_num_frames=filter_min_num_frames,
)
# Dynamic batch size
if data_config.batch_max_frames > 0:
logger.debug(
f"Dynamic batch size with batch_max_frames={data_config.batch_max_frames}"
)
dataset = Dataset(
in_files,
out_files,
lengths,
shuffle=train,
allow_cache=data_config.get("allow_cache", False),
)
if dist.is_initialized():
required_batch_size_multiple = dist.get_world_size()
else:
required_batch_size_multiple = 1
indices = dataset.ordered_indices()
batches = batch_by_size(
indices,
dataset.num_tokens,
max_tokens=data_config.batch_max_frames,
required_batch_size_multiple=required_batch_size_multiple,
)
# Split mini-batches for each rank manually
if dist.is_initialized():
num_replicas = dist.get_world_size()
rank = dist.get_rank()
logger.debug(f"Splitting mini-batches for rank {rank}")
batches = [
x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0
]
logger.info(f"Num mini-batches: {len(batches)}")
for batch in batches:
sizes = [dataset.num_tokens(i) for i in batch]
logger.debug(f"Batch-size: {len(batch)}, Lens: {sizes}")
logger.info(f"Average batch size: {np.mean([len(b) for b in batches])}")
data_loader_extra_kwargs = {
"batch_sampler": ShuffleBatchSampler(batches) if train else batches,
}
sampler = None
else:
logger.debug(f"Fixed batch size: {data_config.batch_size}")
dataset = Dataset(in_files, out_files, lengths)
if dist.is_initialized():
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, shuffle=train
)
shuffle = False
else:
sampler = None
shuffle = train
data_loader_extra_kwargs = {
"batch_size": data_config.batch_size,
"sampler": sampler,
"shuffle": shuffle,
}
data_loaders[phase] = data_utils.DataLoader(
dataset,
collate_fn=collate_fn,
pin_memory=data_config.pin_memory,
num_workers=data_config.num_workers,
**data_loader_extra_kwargs,
)
samplers[phase] = sampler
return data_loaders, samplers
def set_epochs_based_on_max_steps_(train_config, steps_per_epoch, logger):
"""Set epochs based on max steps.
Args:
train_config (TrainConfig): Train config.
steps_per_epoch (int): Number of steps per epoch.
logger (logging.Logger): Logger.
"""
if "max_train_steps" not in train_config:
logger.warning("max_train_steps is not found in the train config.")
return
logger.info(f"Number of iterations per epoch: {steps_per_epoch}")
if train_config.max_train_steps < 0:
# Set max_train_steps based on nepochs
max_train_steps = train_config.nepochs * steps_per_epoch
train_config.max_train_steps = max_train_steps
logger.info(
"Number of max_train_steps is set based on nepochs: {}".format(
max_train_steps
)
)
else:
# Set nepochs based on max_train_steps
max_train_steps = train_config.max_train_steps
epochs = int(np.ceil(max_train_steps / steps_per_epoch))
train_config.nepochs = epochs
logger.info(
"Number of epochs is set based on max_train_steps: {}".format(epochs)
)
logger.info(f"Number of epochs: {train_config.nepochs}")
logger.info(f"Number of iterations: {train_config.max_train_steps}")
def save_checkpoint(
logger,
out_dir,
model,
optimizer,
lr_scheduler,
epoch,
is_best=False,
postfix="",
):
"""Save a checkpoint.
Args:
logger (logging.Logger): Logger.
out_dir (str): Output directory.
model (nn.Module): Model.
optimizer (Optimizer): Optimizer.
lr_scheduler (LRScheduler): Learning rate scheduler.
epoch (int): Current epoch.
is_best (bool, optional): Whether or not the current model is the best.
Defaults to False.
postfix (str, optional): Postfix. Defaults to "".
"""
if dist.is_initialized() and dist.get_rank() != 0:
return
if isinstance(model, nn.DataParallel) or isinstance(model, DDP):
model = model.module
out_dir.mkdir(parents=True, exist_ok=True)
if is_best:
path = out_dir / f"best_loss{postfix}.pth"
else:
path = out_dir / "epoch{:04d}{}.pth".format(epoch, postfix)
torch.save(
{
"state_dict": model.state_dict(),
"optimizer_state": optimizer.state_dict(),
"lr_scheduler_state": lr_scheduler.state_dict(),
},
path,
)
logger.info(f"Saved checkpoint at {path}")
if not is_best:
shutil.copyfile(path, out_dir / f"latest{postfix}.pth")
def get_stream_weight(stream_weights, stream_sizes):
if stream_weights is not None:
assert len(stream_weights) == len(stream_sizes)
return torch.tensor(stream_weights)
S = sum(stream_sizes)
w = torch.tensor(stream_sizes).float() / S
return w
def _instantiate_optim(optim_config, model):
# Optimizer
optimizer_class = getattr(optim, optim_config.optimizer.name)
optimizer = optimizer_class(model.parameters(), **optim_config.optimizer.params)
# Scheduler
lr_scheduler_class = getattr(optim.lr_scheduler, optim_config.lr_scheduler.name)
lr_scheduler = lr_scheduler_class(optimizer, **optim_config.lr_scheduler.params)
return optimizer, lr_scheduler
def _resume(logger, resume_config, model, optimizer, lr_scheduler):
if resume_config.checkpoint is not None and len(resume_config.checkpoint) > 0:
logger.info("Load weights from %s", resume_config.checkpoint)
checkpoint = torch.load(to_absolute_path(resume_config.checkpoint))
state_dict = checkpoint["state_dict"]
model_dict = model.state_dict()
valid_state_dict = {
k: v
for k, v in state_dict.items()
if (k in model_dict) and (v.shape == model_dict[k].shape)
}
non_valid_state_dict = {
k: v for k, v in state_dict.items() if k not in valid_state_dict
}
if len(non_valid_state_dict) > 0:
for k, _ in non_valid_state_dict.items():
logger.warning(f"Skip loading {k} from checkpoint")
model_dict.update(valid_state_dict)
model.load_state_dict(model_dict)
if resume_config.load_optimizer:
logger.info("Load optimizer state")
optimizer.load_state_dict(checkpoint["optimizer_state"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler_state"])
def setup(config, device, collate_fn=collate_fn_default):
"""Setup for training
Args:
config (dict): configuration for training
device (torch.device): device to use for training
collate_fn (callable, optional): collate function. Defaults to collate_fn_default.
Returns:
(tuple): tuple containing model, optimizer, learning rate scheduler,
data loaders, tensorboard writer, logger, and scalers.
"""
if dist.is_initialized():
rank = dist.get_rank()
logger = getLogger(config.verbose) if rank == 0 else getLogger(0)
sys.stdout = open(os.devnull, "w") if rank != 0 else sys.stdout
else:
logger = getLogger(config.verbose)
rank = 0
logger.info(OmegaConf.to_yaml(config))
logger.info(f"PyTorch version: {torch.__version__}")
if torch.cuda.is_available():
from torch.backends import cudnn
cudnn.benchmark = config.train.cudnn.benchmark
cudnn.deterministic = config.train.cudnn.deterministic
logger.info(f"cudnn.deterministic: {cudnn.deterministic}")
logger.info(f"cudnn.benchmark: {cudnn.benchmark}")
if torch.backends.cudnn.version() is not None:
logger.info(f"cuDNN version: {torch.backends.cudnn.version()}")
logger.info(f"Random seed: {config.seed}")
init_seed(config.seed)
if config.train.use_detect_anomaly:
torch.autograd.set_detect_anomaly(True)
logger.info("Set to use torch.autograd.detect_anomaly")
if "use_amp" in config.train and config.train.use_amp:
logger.info("Use mixed precision training")
grad_scaler = GradScaler()
else:
grad_scaler = None
# Model
model = hydra.utils.instantiate(config.model.netG).to(device)
logger.info(model)
logger.info(
"Number of trainable params: {:.3f} million".format(
num_trainable_params(model) / 1000000.0
)
)
# Distributed training
if dist.is_initialized():
device_id = rank % torch.cuda.device_count()
model = DDP(model, device_ids=[device_id])
# Optimizer
optimizer_class = getattr(optim, config.train.optim.optimizer.name)
optimizer = optimizer_class(
model.parameters(), **config.train.optim.optimizer.params
)
# Scheduler
lr_scheduler_class = getattr(
optim.lr_scheduler, config.train.optim.lr_scheduler.name
)
lr_scheduler = lr_scheduler_class(
optimizer, **config.train.optim.lr_scheduler.params
)
# DataLoader
data_loaders, samplers = get_data_loaders(config.data, collate_fn, logger)
set_epochs_based_on_max_steps_(
config.train, len(data_loaders["train_no_dev"]), logger
)
# Resume
_resume(logger, config.train.resume, model, optimizer, lr_scheduler)
if config.data_parallel:
model = nn.DataParallel(model)
# Mlflow
if config.mlflow.enabled:
mlflow.set_tracking_uri("file://" + get_original_cwd() + "/mlruns")
mlflow.set_experiment(config.mlflow.experiment)
# NOTE: disable tensorboard if mlflow is enabled
writer = None
logger.info("Using mlflow instead of tensorboard")
else:
# Tensorboard
if rank == 0:
writer = SummaryWriter(to_absolute_path(config.train.log_dir))
else:
writer = None
# Scalers
if "in_scaler_path" in config.data and config.data.in_scaler_path is not None:
in_scaler = joblib.load(to_absolute_path(config.data.in_scaler_path))
in_scaler = MinMaxScaler(
in_scaler.min_, in_scaler.scale_, in_scaler.data_min_, in_scaler.data_max_
)
else:
in_scaler = None
if "out_scaler_path" in config.data and config.data.out_scaler_path is not None:
out_scaler = joblib.load(to_absolute_path(config.data.out_scaler_path))
out_scaler = StandardScaler(
out_scaler.mean_, out_scaler.var_, out_scaler.scale_
)
else:
out_scaler = None
return (
model,
optimizer,
lr_scheduler,
grad_scaler,
data_loaders,
samplers,
writer,
logger,
in_scaler,
out_scaler,
)
def setup_gan(config, device, collate_fn=collate_fn_default):
"""Setup for training GAN
Args:
config (dict): configuration for training
device (torch.device): device to use for training
collate_fn (callable, optional): collate function. Defaults to collate_fn_default.
Returns:
(tuple): tuple containing model, optimizer, learning rate scheduler,
data loaders, tensorboard writer, logger, and scalers.
"""
if dist.is_initialized():
rank = dist.get_rank()
logger = getLogger(config.verbose) if rank == 0 else getLogger(0)
sys.stdout = open(os.devnull, "w") if rank != 0 else sys.stdout
else:
logger = getLogger(config.verbose)
rank = 0
logger.info(OmegaConf.to_yaml(config))
logger.info(f"PyTorch version: {torch.__version__}")
if torch.cuda.is_available():
from torch.backends import cudnn
cudnn.benchmark = config.train.cudnn.benchmark
cudnn.deterministic = config.train.cudnn.deterministic
logger.info(f"cudnn.deterministic: {cudnn.deterministic}")
logger.info(f"cudnn.benchmark: {cudnn.benchmark}")
if torch.backends.cudnn.version() is not None:
logger.info(f"cuDNN version: {torch.backends.cudnn.version()}")
logger.info(f"Random seed: {config.seed}")
init_seed(config.seed)
if config.train.use_detect_anomaly:
torch.autograd.set_detect_anomaly(True)
logger.info("Set to use torch.autograd.detect_anomaly")
if "use_amp" in config.train and config.train.use_amp:
logger.info("Use mixed precision training")
grad_scaler = GradScaler()
else:
grad_scaler = None
# Model G
netG = hydra.utils.instantiate(config.model.netG).to(device)
logger.info(netG)
logger.info(
"[Generator] Number of trainable params: {:.3f} million".format(
num_trainable_params(netG) / 1000000.0
)
)
if dist.is_initialized():
device_id = rank % torch.cuda.device_count()
netG = DDP(netG, device_ids=[device_id])
# Optimizer and LR scheduler for G
optG, schedulerG = _instantiate_optim(config.train.optim.netG, netG)
# Model D
netD = hydra.utils.instantiate(config.model.netD).to(device)
logger.info(netD)
logger.info(
"[Discriminator] Number of trainable params: {:.3f} million".format(
num_trainable_params(netD) / 1000000.0
)
)
if dist.is_initialized():
device_id = rank % torch.cuda.device_count()
netD = DDP(netD, device_ids=[device_id])
# Optimizer and LR scheduler for D
optD, schedulerD = _instantiate_optim(config.train.optim.netD, netD)
# DataLoader
data_loaders, samplers = get_data_loaders(config.data, collate_fn, logger)
set_epochs_based_on_max_steps_(
config.train, len(data_loaders["train_no_dev"]), logger
)
# Resume
_resume(logger, config.train.resume.netG, netG, optG, schedulerG)
_resume(logger, config.train.resume.netD, netD, optD, schedulerD)
if config.data_parallel:
netG = nn.DataParallel(netG)
netD = nn.DataParallel(netD)
# Mlflow
if config.mlflow.enabled:
mlflow.set_tracking_uri("file://" + get_original_cwd() + "/mlruns")
mlflow.set_experiment(config.mlflow.experiment)
# NOTE: disable tensorboard if mlflow is enabled
writer = None
logger.info("Using mlflow instead of tensorboard")
else:
# Tensorboard
writer = SummaryWriter(to_absolute_path(config.train.log_dir))
# Scalers
if "in_scaler_path" in config.data and config.data.in_scaler_path is not None:
in_scaler = joblib.load(to_absolute_path(config.data.in_scaler_path))
if isinstance(in_scaler, SKMinMaxScaler):
in_scaler = MinMaxScaler(
in_scaler.min_,
in_scaler.scale_,
in_scaler.data_min_,
in_scaler.data_max_,
)
else:
in_scaler = None
if "out_scaler_path" in config.data and config.data.out_scaler_path is not None:
out_scaler = joblib.load(to_absolute_path(config.data.out_scaler_path))
out_scaler = StandardScaler(
out_scaler.mean_, out_scaler.var_, out_scaler.scale_
)
else:
out_scaler = None
return (
(netG, optG, schedulerG),
(netD, optD, schedulerD),
grad_scaler,
data_loaders,
samplers,
writer,
logger,
in_scaler,
out_scaler,
)
def save_configs(config):
out_dir = Path(to_absolute_path(config.train.out_dir))
out_dir.mkdir(parents=True, exist_ok=True)
with open(out_dir / "model.yaml", "w") as f:
OmegaConf.save(config.model, f)
with open(out_dir / "config.yaml", "w") as f:
OmegaConf.save(config, f)
def check_resf0_config(logger, model, config, in_scaler, out_scaler):
logger.info("Checking model configs for residual F0 prediction")
if in_scaler is None or out_scaler is None:
raise ValueError("in_scaler and out_scaler must be specified")
if isinstance(model, nn.DataParallel) or isinstance(model, DDP):
model = model.module
in_lf0_idx = config.data.in_lf0_idx
in_rest_idx = config.data.in_rest_idx
out_lf0_idx = config.data.out_lf0_idx
if in_lf0_idx is None or in_rest_idx is None or out_lf0_idx is None:
raise ValueError("in_lf0_idx, in_rest_idx and out_lf0_idx must be specified")
logger.info("in_lf0_idx: %s", in_lf0_idx)
logger.info("in_rest_idx: %s", in_rest_idx)
logger.info("out_lf0_idx: %s", out_lf0_idx)
ok = True
if hasattr(model, "in_lf0_idx"):
if model.in_lf0_idx != in_lf0_idx:
logger.warning(
"in_lf0_idx in model and data config must be same",
model.in_lf0_idx,
in_lf0_idx,
)
ok = False
if hasattr(model, "out_lf0_idx"):
if model.out_lf0_idx != out_lf0_idx:
logger.warning(
"out_lf0_idx in model and data config must be same",
model.out_lf0_idx,
out_lf0_idx,
)
ok = False
if hasattr(model, "in_lf0_min") and hasattr(model, "in_lf0_max"):
# Inject values from the input scaler
if model.in_lf0_min is None or model.in_lf0_max is None:
model.in_lf0_min = in_scaler.data_min_[in_lf0_idx]
model.in_lf0_max = in_scaler.data_max_[in_lf0_idx]
logger.info("in_lf0_min: %s", model.in_lf0_min)
logger.info("in_lf0_max: %s", model.in_lf0_max)
if not np.allclose(model.in_lf0_min, in_scaler.data_min_[model.in_lf0_idx]):
logger.warning(
f"in_lf0_min is set to {model.in_lf0_min}, "
f"but should be {in_scaler.data_min_[model.in_lf0_idx]}"
)
ok = False
if not np.allclose(model.in_lf0_max, in_scaler.data_max_[model.in_lf0_idx]):
logger.warning(
f"in_lf0_max is set to {model.in_lf0_max}, "
f"but should be {in_scaler.data_max_[model.in_lf0_idx]}"
)
ok = False
if hasattr(model, "out_lf0_mean") and hasattr(model, "out_lf0_scale"):
# Inject values from the output scaler
if model.out_lf0_mean is None or model.out_lf0_scale is None:
model.out_lf0_mean = float(out_scaler.mean_[out_lf0_idx])
model.out_lf0_scale = float(out_scaler.scale_[out_lf0_idx])
logger.info("model.out_lf0_mean: %s", model.out_lf0_mean)
logger.info("model.out_lf0_scale: %s", model.out_lf0_scale)
if not np.allclose(model.out_lf0_mean, out_scaler.mean_[model.out_lf0_idx]):
logger.warning(
f"out_lf0_mean is set to {model.out_lf0_mean}, "
f"but should be {out_scaler.mean_[model.out_lf0_idx]}"
)
ok = False
if not np.allclose(model.out_lf0_scale, out_scaler.scale_[model.out_lf0_idx]):
logger.warning(
f"out_lf0_scale is set to {model.out_lf0_scale}, "
f"but should be {out_scaler.scale_[model.out_lf0_idx]}"
)
ok = False
if not ok:
if (
model.in_lf0_idx == in_lf0_idx
and hasattr(model, "in_lf0_min")
and hasattr(model, "out_lf0_mean")
):
logger.info(
f"""
If you are 100% sure that you set model.in_lf0_idx and model.out_lf0_idx correctly,
Please consider the following parameters in your model config:
in_lf0_idx: {model.in_lf0_idx}
out_lf0_idx: {model.out_lf0_idx}
in_lf0_min: {in_scaler.data_min_[model.in_lf0_idx]}
in_lf0_max: {in_scaler.data_max_[model.in_lf0_idx]}
out_lf0_mean: {out_scaler.mean_[model.out_lf0_idx]}
out_lf0_scale: {out_scaler.scale_[model.out_lf0_idx]}
"""
)
raise ValueError("The model config has wrong configurations.")
# Overwrite the parameters to the config
for key in ["in_lf0_min", "in_lf0_max", "out_lf0_mean", "out_lf0_scale"]:
if hasattr(model, key):
config.model.netG[key] = float(getattr(model, key))
def compute_pitch_regularization_weight(segments, N, decay_size=25, max_w=0.5):
"""Compute pitch regularization weight given note segments
Args:
segments (list): list of note (start, end) indices
N (int): number of frames
decay_size (int): size of the decay window
max_w (float): maximum weight
Returns:
Tensor: weights of shape (N,)
"""
w = torch.zeros(N)
for s, e in segments:
L = e - s
w[s:e] = max_w
if L > decay_size * 2:
w[s : s + decay_size] *= torch.arange(decay_size) / decay_size
w[e - decay_size : e] *= torch.arange(decay_size - 1, -1, -1) / decay_size
else:
# For shote notes (less than decay_size*0.01 sec) we don't use pitch regularization
w[s:e] = 0.0
return w
def compute_batch_pitch_regularization_weight(lf0_score_denorm, decay_size):
"""Batch version of computing pitch regularization weight
Args:
lf0_score_denorm (Tensor): (B, T)
Returns:
Tensor: weights of shape (B, N, 1)
"""
B, T = lf0_score_denorm.shape
w = torch.zeros_like(lf0_score_denorm)
for idx in range(len(lf0_score_denorm)):
segments = note_segments(lf0_score_denorm[idx])
w[idx, :] = compute_pitch_regularization_weight(
segments, N=T, decay_size=decay_size
).to(w.device)
return w.unsqueeze(-1)
@torch.no_grad()
def compute_distortions(pred_out_feats, out_feats, lengths, out_scaler, model_config):
"""Compute distortion measures between predicted and ground-truth acoustic features
Args:
pred_out_feats (nn.Tensor): predicted acoustic features
out_feats (nn.Tensor): ground-truth acoustic features
lengths (nn.Tensor): lengths of the sequences
out_scaler (nn.Module): scaler to denormalize features
model_config (dict): model configuration
Returns:
dict: a dict that includes MCD for mgc/bap, V/UV error and F0 RMSE
"""
out_feats = out_scaler.inverse_transform(out_feats)
pred_out_feats = out_scaler.inverse_transform(pred_out_feats)
out_streams = get_static_features(
out_feats,
model_config.num_windows,
model_config.stream_sizes,
model_config.has_dynamic_features,
)
pred_out_streams = get_static_features(
pred_out_feats,
model_config.num_windows,
model_config.stream_sizes,
model_config.has_dynamic_features,
)
if len(out_streams) >= 4:
mgc, lf0, vuv, bap = (
out_streams[0],
out_streams[1],
out_streams[2],
out_streams[3],
)
pred_mgc, pred_lf0, pred_vuv, pred_bap = (
pred_out_streams[0],
pred_out_streams[1],
pred_out_streams[2],
pred_out_streams[3],
)
elif len(out_streams) == 3:
mgc, lf0, vuv = out_streams[0], out_streams[1], out_streams[2]
pred_mgc, pred_lf0, pred_vuv = (
pred_out_streams[0],
pred_out_streams[1],
pred_out_streams[2],
)
bap = None
pred_bap = None
# binarize vuv
vuv, pred_vuv = (vuv > 0.5).float(), (pred_vuv > 0.5).float()
dist = {
"ObjEval_MGC_MCD": metrics.melcd(
mgc[:, :, 1:], pred_mgc[:, :, 1:], lengths=lengths
),
"ObjEval_VUV_ERR": metrics.vuv_error(vuv, pred_vuv, lengths=lengths),
}
if bap is not None:
dist["ObjEval_BAP_MCD"] = metrics.melcd(bap, pred_bap, lengths=lengths) / 10.0
try:
f0_mse = metrics.lf0_mean_squared_error(
lf0, vuv, pred_lf0, pred_vuv, lengths=lengths, linear_domain=True
)
dist["ObjEval_F0_RMSE"] = np.sqrt(f0_mse)
except ZeroDivisionError:
pass
return dist
@torch.no_grad()
def eval_pitch_model(
phase,
step,
netG,
in_feats,
out_feats,
lengths,
model_config,
out_scaler,
writer,
sr,
lf0_score_denorm,
max_num_eval_utts=10,
):
if dist.is_initialized() and dist.get_rank() != 0:
return
if writer is None:
return
# make sure to be in eval mode
netG.eval()
prediction_type = (
netG.module.prediction_type()
if isinstance(netG, nn.DataParallel) or isinstance(netG, DDP)
else netG.prediction_type()
)
utt_indices = list(range(max_num_eval_utts))
utt_indices = utt_indices[: min(len(utt_indices), len(in_feats))]
assert not np.any(model_config.has_dynamic_features)
for utt_idx in utt_indices:
out_feats_denorm_ = out_scaler.inverse_transform(
out_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0)
)
lf0 = out_feats_denorm_.squeeze(0).cpu().numpy().reshape(-1)
lf0_score_denorm_ = (
lf0_score_denorm[utt_idx, : lengths[utt_idx]].cpu().numpy().reshape(-1)
)
# Run forward
outs = netG(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
out_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
)
# ResF0 case
if netG.has_residual_lf0_prediction():
outs, _ = outs
if prediction_type == PredictionType.PROBABILISTIC:
pi, sigma, mu = outs
pred_out_feats = mdn_get_most_probable_sigma_and_mu(pi, sigma, mu)[1]
else:
pred_out_feats = outs
# NOTE: multiple outputs
if isinstance(pred_out_feats, list):
pred_out_feats = pred_out_feats[-1]
if isinstance(pred_out_feats, tuple):
pred_out_feats = pred_out_feats[0]
if not isinstance(pred_out_feats, list):
pred_out_feats = [pred_out_feats]
# Run inference
if prediction_type == PredictionType.PROBABILISTIC:
if isinstance(netG, nn.DataParallel) or isinstance(netG, DDP):
inference_out_feats, _ = netG.module.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
else:
inference_out_feats, _ = netG.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
else:
if isinstance(netG, nn.DataParallel) or isinstance(netG, DDP):
inference_out_feats = netG.module.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
else:
inference_out_feats = netG.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
pred_out_feats.append(inference_out_feats)
assert len(pred_out_feats) == 2
for idx, pred_out_feats_ in enumerate(pred_out_feats):
pred_out_feats_ = pred_out_feats_.squeeze(0).cpu().numpy()
pred_lf0 = (
out_scaler.inverse_transform(
torch.from_numpy(pred_out_feats_).to(in_feats.device)
)
.cpu()
.numpy()
).reshape(-1)
if idx == 1:
group = f"{phase}_utt{np.abs(utt_idx)}_inference"
else:
group = f"{phase}_utt{np.abs(utt_idx)}_forward"
# Continuous log-F0
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
timeaxis = np.arange(len(lf0)) * 0.005
ax.plot(
timeaxis, lf0, linewidth=1.5, color="tab:blue", label="Target log-F0"
)
ax.plot(
timeaxis,
pred_lf0,
linewidth=1.5,
color="tab:orange",
label="Predicted log-F0",
)
ax.plot(
timeaxis,
lf0_score_denorm_,
"--",
color="gray",
linewidth=1.3,
label="Note log-F0",
)
ax.set_xlabel("Time [sec]")
ax.set_ylabel("Log-frequency [Hz]")
ax.set_xlim(timeaxis[0], timeaxis[-1])
ax.set_ylim(
min(
min(lf0_score_denorm_[lf0_score_denorm_ > 0]),
min(lf0),
min(pred_lf0),
)
- 0.1,
max(max(lf0_score_denorm_), max(lf0), max(pred_lf0)) + 0.1,
)
plt.legend(loc="upper right", borderaxespad=0, ncol=3)
plt.tight_layout()
writer.add_figure(f"{group}/ContinuousLogF0", fig, step)
plt.close()
# F0
lf0_score = lf0_score_denorm_.copy()
note_indices = lf0_score > 0
lf0_score[note_indices] = np.exp(lf0_score[note_indices])
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
timeaxis = np.arange(len(lf0)) * 0.005
ax.plot(
timeaxis,
np.exp(lf0) * np.sign(lf0_score),
linewidth=1.5,
color="tab:blue",
label="Target F0",
)
ax.plot(
timeaxis,
np.exp(pred_lf0) * np.sign(lf0_score),
linewidth=1.5,
color="tab:orange",
label="Predicted F0",
)
ax.plot(
timeaxis, lf0_score, "--", linewidth=1.3, color="gray", label="Note F0"
)
ax.set_xlabel("Time [sec]")
ax.set_ylabel("Frequency [Hz]")
ax.set_xlim(timeaxis[0], timeaxis[-1])
ax.set_ylim(
min(
min(lf0_score[lf0_score > 0]),
min(np.exp(lf0)),
min(np.exp(pred_lf0)),
)
- 10,
max(max(lf0_score), max(np.exp(lf0)), max(np.exp(pred_lf0))) + 10,
)
plt.legend(loc="upper right", borderaxespad=0, ncol=3)
plt.tight_layout()
writer.add_figure(f"{group}/F0", fig, step)
plt.close()
def synthesize(
device,
mgc,
lf0,
vuv,
bap,
sr,
use_world_codec=False,
vuv_threshold=0.3,
vocoder=None,
vocoder_in_scaler=None,
vocoder_config=None,
):
if vocoder is not None:
is_usfgan = "generator" in vocoder_config and "discriminator" in vocoder_config
assert vocoder_in_scaler is not None
if not is_usfgan:
# NOTE: So far vocoder models are trained on binary V/UV features
vuv = (vuv > vuv_threshold).astype(np.float32)
voc_inp = (
torch.from_numpy(
vocoder_in_scaler.transform(
np.concatenate([mgc, lf0, vuv, bap], axis=-1)
)
)
.float()
.to(device)
)
wav = vocoder.inference(voc_inp).view(-1).to("cpu").numpy()
else:
fftlen = pyworld.get_cheaptrick_fft_size(sr)
use_mcep_aperiodicity = bap.shape[-1] > 5
if use_mcep_aperiodicity:
mcep_aperiodicity_order = bap.shape[-1] - 1
alpha = pysptk.util.mcepalpha(sr)
aperiodicity = pysptk.mc2sp(
np.ascontiguousarray(bap).astype(np.float64),
fftlen=fftlen,
alpha=alpha,
)
else:
aperiodicity = pyworld.decode_aperiodicity(
np.ascontiguousarray(bap).astype(np.float64), sr, fftlen
)
# fill aperiodicity with ones for unvoiced regions
aperiodicity[vuv.reshape(-1) < vuv_threshold, 0] = 1.0
# WORLD fails catastrophically for out of range aperiodicity
aperiodicity = np.clip(aperiodicity, 0.0, 1.0)
# back to bap
if use_mcep_aperiodicity:
bap = pysptk.sp2mc(
aperiodicity,
order=mcep_aperiodicity_order,
alpha=alpha,
)
else:
bap = pyworld.code_aperiodicity(aperiodicity, sr).astype(np.float32)
aux_feats = (
torch.from_numpy(
vocoder_in_scaler.transform(np.concatenate([mgc, bap], axis=-1))
)
.float()
.to(device)
)
contf0 = np.exp(lf0)
if vocoder_config.data.sine_f0_type in ["contf0", "cf0"]:
f0_inp = contf0
elif vocoder_config.data.sine_f0_type == "f0":
f0_inp = contf0
f0_inp[vuv < vuv_threshold] = 0
wav = vocoder.inference(f0_inp, aux_feats).view(-1).to("cpu").numpy()
else:
# Fallback to WORLD
f0, spectrogram, aperiodicity = gen_world_params(
mgc,
lf0,
vuv,
bap,
sr,
use_world_codec=use_world_codec,
)
wav = pyworld.synthesize(f0, spectrogram, aperiodicity, sr, 5)
return wav
def synthesize_from_mel(
device,
logmel,
lf0,
vuv,
sr,
vuv_threshold=0.3,
vocoder=None,
vocoder_in_scaler=None,
vocoder_config=None,
):
if vocoder is not None:
is_usfgan = "generator" in vocoder_config and "discriminator" in vocoder_config
assert vocoder_in_scaler is not None
if not is_usfgan:
# NOTE: So far vocoder models are trained on binary V/UV features
vuv = (vuv > vuv_threshold).astype(np.float32)
voc_inp = (
torch.from_numpy(
vocoder_in_scaler.transform(
np.concatenate([logmel, lf0, vuv], axis=-1)
)
)
.float()
.to(device)
)
wav = vocoder.inference(voc_inp).view(-1).to("cpu").numpy()
else:
# NOTE: So far vocoder models are trained on binary V/UV features
vuv = (vuv > vuv_threshold).astype(np.float32)
aux_feats = (
torch.from_numpy(vocoder_in_scaler.transform(logmel)).float().to(device)
)
contf0 = np.exp(lf0)
if vocoder_config.data.sine_f0_type in ["contf0", "cf0"]:
f0_inp = contf0
elif vocoder_config.data.sine_f0_type == "f0":
f0_inp = contf0
f0_inp[vuv < vuv_threshold] = 0
wav = vocoder.inference(f0_inp, aux_feats).view(-1).to("cpu").numpy()
else:
raise RuntimeError("Not supported")
return wav
@torch.no_grad()
def eval_model(
phase,
step,
netG,
in_feats,
out_feats,
lengths,
model_config,
out_scaler,
writer,
sr,
lf0_score_denorm=None,
trajectory_smoothing=True,
trajectory_smoothing_cutoff=50,
trajectory_smoothing_cutoff_f0=20,
use_world_codec=False,
vocoder=None,
vocoder_in_scaler=None,
vocoder_config=None,
vuv_threshold=0.3,
max_num_eval_utts=10,
):
if len(model_config.stream_sizes) >= 4:
return eval_spss_model(
phase=phase,
step=step,
netG=netG,
in_feats=in_feats,
out_feats=out_feats,
lengths=lengths,
model_config=model_config,
out_scaler=out_scaler,
writer=writer,
sr=sr,
lf0_score_denorm=lf0_score_denorm,
trajectory_smoothing=trajectory_smoothing,
trajectory_smoothing_cutoff=trajectory_smoothing_cutoff,
trajectory_smoothing_cutoff_f0=trajectory_smoothing_cutoff_f0,
use_world_codec=use_world_codec,
vocoder=vocoder,
vocoder_in_scaler=vocoder_in_scaler,
vocoder_config=vocoder_config,
vuv_threshold=vuv_threshold,
max_num_eval_utts=max_num_eval_utts,
)
else:
return eval_mel_model(
phase=phase,
step=step,
netG=netG,
in_feats=in_feats,
out_feats=out_feats,
lengths=lengths,
model_config=model_config,
out_scaler=out_scaler,
writer=writer,
sr=sr,
lf0_score_denorm=lf0_score_denorm,
vocoder=vocoder,
vocoder_in_scaler=vocoder_in_scaler,
vocoder_config=vocoder_config,
vuv_threshold=vuv_threshold,
max_num_eval_utts=max_num_eval_utts,
)
@torch.no_grad()
def eval_spss_model(
phase,
step,
netG,
in_feats,
out_feats,
lengths,
model_config,
out_scaler,
writer,
sr,
lf0_score_denorm=None,
trajectory_smoothing=True,
trajectory_smoothing_cutoff=50,
trajectory_smoothing_cutoff_f0=20,
use_world_codec=False,
vocoder=None,
vocoder_in_scaler=None,
vocoder_config=None,
vuv_threshold=0.3,
max_num_eval_utts=10,
):
if dist.is_initialized() and dist.get_rank() != 0:
return
if writer is None:
return
# make sure to be in eval mode
netG.eval()
prediction_type = (
netG.module.prediction_type()
if isinstance(netG, nn.DataParallel) or isinstance(netG, DDP)
else netG.prediction_type()
)
utt_indices = list(range(max_num_eval_utts))
utt_indices = utt_indices[: min(len(utt_indices), len(in_feats))]
if np.any(model_config.has_dynamic_features):
static_stream_sizes = get_static_stream_sizes(
model_config.stream_sizes,
model_config.has_dynamic_features,
model_config.num_windows,
)
else:
static_stream_sizes = model_config.stream_sizes
rawsp_output = False
for utt_idx in utt_indices:
out_feats_denorm_ = out_scaler.inverse_transform(
out_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0)
)
mgc, lf0, vuv, bap = get_static_features(
out_feats_denorm_,
model_config.num_windows,
model_config.stream_sizes,
model_config.has_dynamic_features,
)[:4]
mgc = mgc.squeeze(0).cpu().numpy()
lf0 = lf0.squeeze(0).cpu().numpy()
vuv = vuv.squeeze(0).cpu().numpy()
bap = bap.squeeze(0).cpu().numpy()
if lf0_score_denorm is not None:
lf0_score_denorm_ = (
lf0_score_denorm[utt_idx, : lengths[utt_idx]].cpu().numpy().reshape(-1)
)
else:
lf0_score_denorm_ = None
# log spectrogram case
rawsp_output = mgc.shape[1] >= 128
if rawsp_output:
sp = np.exp(mgc)
# NOTE: 60-dim mgc is asummed
mgc = pyworld.code_spectral_envelope(sp, sr, 60)
assert use_world_codec
else:
sp = None
wav = synthesize(
device=in_feats.device,
mgc=mgc,
lf0=lf0,
vuv=vuv,
bap=bap,
sr=sr,
use_world_codec=use_world_codec,
vuv_threshold=vuv_threshold,
vocoder=vocoder,
vocoder_in_scaler=vocoder_in_scaler,
vocoder_config=vocoder_config,
)
group = f"{phase}_utt{np.abs(utt_idx)}_reference"
wav = wav / np.abs(wav).max() if np.abs(wav).max() > 1.0 else wav
writer.add_audio(group, wav, step, sr)
# Run forward
outs = netG(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
out_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
)
# ResF0 case
if netG.has_residual_lf0_prediction():
outs, _ = outs
# Hybrid
if prediction_type == PredictionType.MULTISTREAM_HYBRID:
pred_mgc, pred_lf0, pred_vuv, pred_bap = outs
if isinstance(pred_lf0, tuple) and len(pred_lf0) == 3:
pred_lf0 = mdn_get_most_probable_sigma_and_mu(*pred_lf0)[1]
elif isinstance(pred_lf0, tuple) and len(pred_lf0) == 2:
pred_lf0 = pred_lf0[1]
if isinstance(pred_mgc, tuple) and len(pred_mgc) == 3:
pred_mgc = mdn_get_most_probable_sigma_and_mu(*pred_mgc)[1]
elif isinstance(pred_mgc, tuple) and len(pred_mgc) == 2:
pred_mgc = pred_mgc[1]
if isinstance(pred_bap, tuple) and len(pred_bap) == 3:
pred_bap = mdn_get_most_probable_sigma_and_mu(*pred_bap)[1]
elif isinstance(pred_bap, tuple) and len(pred_bap) == 2:
pred_bap = pred_bap[1]
pred_out_feats = torch.cat([pred_mgc, pred_lf0, pred_vuv, pred_bap], dim=-1)
elif prediction_type == PredictionType.PROBABILISTIC:
pi, sigma, mu = outs
pred_out_feats = mdn_get_most_probable_sigma_and_mu(pi, sigma, mu)[1]
else:
pred_out_feats = outs
# NOTE: multiple outputs
if isinstance(pred_out_feats, list):
pred_out_feats = pred_out_feats[-1]
if isinstance(pred_out_feats, tuple):
pred_out_feats = pred_out_feats[0]
if not isinstance(pred_out_feats, list):
pred_out_feats = [pred_out_feats]
# Run inference
if prediction_type in [
PredictionType.PROBABILISTIC,
PredictionType.MULTISTREAM_HYBRID,
]:
if isinstance(netG, nn.DataParallel) or isinstance(netG, DDP):
inference_out_feats, _ = netG.module.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
else:
inference_out_feats, _ = netG.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
else:
if isinstance(netG, nn.DataParallel) or isinstance(netG, DDP):
inference_out_feats = netG.module.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
else:
inference_out_feats = netG.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
pred_out_feats.append(inference_out_feats)
assert len(pred_out_feats) == 2
for idx, pred_out_feats_ in enumerate(pred_out_feats):
pred_out_feats_ = pred_out_feats_.squeeze(0).cpu().numpy()
pred_out_feats_denorm = (
out_scaler.inverse_transform(
torch.from_numpy(pred_out_feats_).to(in_feats.device)
)
.cpu()
.numpy()
)
if np.any(model_config.has_dynamic_features):
# (T, D_out) -> (T, static_dim)
pred_out_feats_denorm = multi_stream_mlpg(
pred_out_feats_denorm,
(out_scaler.scale_ ** 2).cpu().numpy(),
get_windows(model_config.num_windows),
model_config.stream_sizes,
model_config.has_dynamic_features,
)
pred_mgc, pred_lf0, pred_vuv, pred_bap = split_streams(
pred_out_feats_denorm, static_stream_sizes
)[:4]
# log spectrogram case
if rawsp_output:
pred_sp = np.exp(pred_mgc)
# NOTE: 60-dim mgc is asummed
pred_mgc = pyworld.code_spectral_envelope(pred_sp, sr, 60)
else:
pred_sp = None
# Remove high-frequency components of lf0/mgc/bap
# NOTE: Useful to reduce high-frequency artifacts
if trajectory_smoothing:
modfs = int(1 / 0.005)
pred_lf0[:, 0] = lowpass_filter(
pred_lf0[:, 0], modfs, cutoff=trajectory_smoothing_cutoff_f0
)
for d in range(pred_mgc.shape[1]):
pred_mgc[:, d] = lowpass_filter(
pred_mgc[:, d], modfs, cutoff=trajectory_smoothing_cutoff
)
for d in range(pred_bap.shape[1]):
pred_bap[:, d] = lowpass_filter(
pred_bap[:, d], modfs, cutoff=trajectory_smoothing_cutoff
)
# Generated sample
wav = synthesize(
device=in_feats.device,
mgc=pred_mgc,
lf0=pred_lf0,
vuv=pred_vuv,
bap=pred_bap,
sr=sr,
use_world_codec=use_world_codec,
vuv_threshold=vuv_threshold,
vocoder=vocoder,
vocoder_in_scaler=vocoder_in_scaler,
vocoder_config=vocoder_config,
)
wav = wav / np.abs(wav).max() if np.abs(wav).max() > 1.0 else wav
if idx == 1:
group = f"{phase}_utt{np.abs(utt_idx)}_inference"
else:
group = f"{phase}_utt{np.abs(utt_idx)}_forward"
writer.add_audio(group, wav, step, sr)
try:
plot_spsvs_params(
step,
writer,
mgc,
lf0,
vuv,
bap,
pred_mgc,
pred_lf0,
pred_vuv,
pred_bap,
lf0_score=lf0_score_denorm_,
group=group,
sr=sr,
use_world_codec=use_world_codec,
sp=sp,
pred_sp=pred_sp,
)
except IndexError as e:
# In _quantile_ureduce_func:
# IndexError: index -1 is out of bounds for axis 0 with size 0
print(str(e))
@torch.no_grad()
def eval_mel_model(
phase,
step,
netG,
in_feats,
out_feats,
lengths,
model_config,
out_scaler,
writer,
sr,
lf0_score_denorm=None,
vocoder=None,
vocoder_in_scaler=None,
vocoder_config=None,
vuv_threshold=0.3,
max_num_eval_utts=10,
):
if dist.is_initialized() and dist.get_rank() != 0:
return
if writer is None:
return
# make sure to be in eval mode
netG.eval()
prediction_type = (
netG.module.prediction_type()
if isinstance(netG, nn.DataParallel) or isinstance(netG, DDP)
else netG.prediction_type()
)
utt_indices = list(range(max_num_eval_utts))
utt_indices = utt_indices[: min(len(utt_indices), len(in_feats))]
assert not np.any(model_config.has_dynamic_features)
static_stream_sizes = model_config.stream_sizes
for utt_idx in utt_indices:
out_feats_denorm_ = out_scaler.inverse_transform(
out_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0)
)
logmel, lf0, vuv = get_static_features(
out_feats_denorm_,
model_config.num_windows,
model_config.stream_sizes,
model_config.has_dynamic_features,
)
logmel = logmel.squeeze(0).cpu().numpy()
lf0 = lf0.squeeze(0).cpu().numpy()
vuv = vuv.squeeze(0).cpu().numpy()
if lf0_score_denorm is not None:
lf0_score_denorm_ = (
lf0_score_denorm[utt_idx, : lengths[utt_idx]].cpu().numpy().reshape(-1)
)
else:
lf0_score_denorm_ = None
group = f"{phase}_utt{np.abs(utt_idx)}_reference"
if vocoder is not None:
wav = synthesize_from_mel(
device=in_feats.device,
logmel=logmel,
lf0=lf0,
vuv=vuv,
sr=sr,
vuv_threshold=vuv_threshold,
vocoder=vocoder,
vocoder_in_scaler=vocoder_in_scaler,
vocoder_config=vocoder_config,
)
wav = wav / np.abs(wav).max() if np.abs(wav).max() > 1.0 else wav
writer.add_audio(group, wav, step, sr)
# Run forward
outs = netG(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
out_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
)
# ResF0 case
if netG.has_residual_lf0_prediction():
outs, _ = outs
# Hybrid
if prediction_type == PredictionType.MULTISTREAM_HYBRID:
pred_logmel, pred_lf0, pred_vuv = outs
if isinstance(pred_lf0, tuple) and len(pred_lf0) == 3:
pred_lf0 = mdn_get_most_probable_sigma_and_mu(*pred_lf0)[1]
elif isinstance(pred_lf0, tuple) and len(pred_lf0) == 2:
pred_lf0 = pred_lf0[1]
if isinstance(pred_logmel, tuple) and len(pred_logmel) == 3:
pred_logmel = mdn_get_most_probable_sigma_and_mu(*pred_logmel)[1]
elif isinstance(pred_logmel, tuple) and len(pred_logmel) == 2:
pred_logmel = pred_logmel[1]
pred_out_feats = torch.cat([pred_logmel, pred_lf0, pred_vuv], dim=-1)
elif prediction_type == PredictionType.PROBABILISTIC:
pi, sigma, mu = outs
pred_out_feats = mdn_get_most_probable_sigma_and_mu(pi, sigma, mu)[1]
else:
pred_out_feats = outs
# NOTE: multiple outputs
if isinstance(pred_out_feats, list):
pred_out_feats = pred_out_feats[-1]
if isinstance(pred_out_feats, tuple):
pred_out_feats = pred_out_feats[0]
if not isinstance(pred_out_feats, list):
pred_out_feats = [pred_out_feats]
# Run inference
if prediction_type in [
PredictionType.PROBABILISTIC,
PredictionType.MULTISTREAM_HYBRID,
]:
if isinstance(netG, nn.DataParallel) or isinstance(netG, DDP):
inference_out_feats, _ = netG.module.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
else:
inference_out_feats, _ = netG.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
else:
if isinstance(netG, nn.DataParallel) or isinstance(netG, DDP):
inference_out_feats = netG.module.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
else:
inference_out_feats = netG.inference(
in_feats[utt_idx, : lengths[utt_idx]].unsqueeze(0),
[lengths[utt_idx]],
)
pred_out_feats.append(inference_out_feats)
assert len(pred_out_feats) == 2
for idx, pred_out_feats_ in enumerate(pred_out_feats):
pred_out_feats_ = pred_out_feats_.squeeze(0).cpu().numpy()
pred_out_feats_denorm = (
out_scaler.inverse_transform(
torch.from_numpy(pred_out_feats_).to(in_feats.device)
)
.cpu()
.numpy()
)
pred_logmel, pred_lf0, pred_vuv = split_streams(
pred_out_feats_denorm, static_stream_sizes
)
if idx == 1:
group = f"{phase}_utt{np.abs(utt_idx)}_inference"
else:
group = f"{phase}_utt{np.abs(utt_idx)}_forward"
# Generated sample
if vocoder is not None:
wav = synthesize_from_mel(
device=in_feats.device,
logmel=pred_logmel,
lf0=pred_lf0,
vuv=pred_vuv,
sr=sr,
vuv_threshold=vuv_threshold,
vocoder=vocoder,
vocoder_in_scaler=vocoder_in_scaler,
vocoder_config=vocoder_config,
)
wav = wav / np.abs(wav).max() if np.abs(wav).max() > 1.0 else wav
writer.add_audio(group, wav, step, sr)
try:
plot_mel_params(
step,
writer,
logmel,
lf0,
vuv,
pred_logmel,
pred_lf0,
pred_vuv,
lf0_score=lf0_score_denorm_,
group=group,
sr=sr,
)
except IndexError as e:
# In _quantile_ureduce_func:
# IndexError: index -1 is out of bounds for axis 0 with size 0
print(str(e))
def _colorbar_wrap(fig, mesh, ax, format="%+2.f dB"):
try:
fig.colorbar(mesh, ax=ax, format=format)
except IndexError as e:
# In _quantile_ureduce_func:
# IndexError: index -1 is out of bounds for axis 0 with size 0
print(str(e))
@torch.no_grad()
def plot_spsvs_params(
step,
writer,
mgc,
lf0,
vuv,
bap,
pred_mgc,
pred_lf0,
pred_vuv,
pred_bap,
lf0_score,
group,
sr,
use_world_codec=False,
sp=None,
pred_sp=None,
):
"""Plot acoustic parameters of parametric SVS
Args:
step (int): step of the current iteration
writer (tensorboard.SummaryWriter): tensorboard writer
mgc (np.ndarray): mgc
lf0 (np.ndarray): lf0
vuv (np.ndarray): vuv
bap (np.ndarray): bap
pred_mgc (np.ndarray): predicted mgc
pred_lf0 (np.ndarray): predicted lf0
pred_vuv (np.ndarray): predicted vuv
pred_bap (np.ndarray): predicted bap
f0_score (np.ndarray): lf0 score
group (str): group name
sr (int): sampling rate
use_world_codec (bool): use world codec for spectral envelope or not
"""
if dist.is_initialized() and dist.get_rank() != 0:
return
assert writer is not None
fftlen = pyworld.get_cheaptrick_fft_size(sr)
alpha = pysptk.util.mcepalpha(sr)
hop_length = int(sr * 0.005)
use_mcep_aperiodicity = bap.shape[-1] > 5
# Log-F0
if lf0_score is not None:
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
timeaxis = np.arange(len(lf0)) * 0.005
ax.plot(timeaxis, lf0, linewidth=1.5, color="tab:blue", label="Target log-F0")
ax.plot(
timeaxis,
pred_lf0,
linewidth=1.5,
color="tab:orange",
label="Predicted log-F0",
)
ax.plot(
timeaxis,
lf0_score,
"--",
color="gray",
linewidth=1.3,
label="Note log-F0",
)
ax.set_xlabel("Time [sec]")
ax.set_ylabel("Log-frequency [Hz]")
ax.set_xlim(timeaxis[0], timeaxis[-1])
ax.set_ylim(
min(min(lf0_score[lf0_score > 0]), min(lf0), min(pred_lf0)) - 0.1,
max(max(lf0_score), max(lf0), max(pred_lf0)) + 0.1,
)
plt.legend(loc="upper right", borderaxespad=0, ncol=3)
plt.tight_layout()
writer.add_figure(f"{group}/ContinuousLogF0", fig, step)
plt.close()
f0_score = lf0_score.copy()
note_indices = f0_score > 0
f0_score[note_indices] = np.exp(lf0_score[note_indices])
# F0
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
timeaxis = np.arange(len(lf0)) * 0.005
f0 = np.exp(lf0)
f0[vuv < 0.5] = 0
pred_f0 = np.exp(pred_lf0)
pred_f0[pred_vuv < 0.5] = 0
ax.plot(
timeaxis,
f0,
linewidth=1.5,
color="tab:blue",
label="Target F0",
)
ax.plot(
timeaxis,
pred_f0,
linewidth=1.5,
color="tab:orange",
label="Predicted F0",
)
ax.plot(timeaxis, f0_score, "--", linewidth=1.3, color="gray", label="Note F0")
ax.set_xlabel("Time [sec]")
ax.set_ylabel("Frequency [Hz]")
ax.set_xlim(timeaxis[0], timeaxis[-1])
ax.set_ylim(
min(min(f0_score[f0_score > 0]), min(np.exp(lf0)), min(np.exp(pred_lf0)))
- 10,
max(max(f0_score), max(np.exp(lf0)), max(np.exp(pred_lf0))) + 10,
)
plt.legend(loc="upper right", borderaxespad=0, ncol=3)
plt.tight_layout()
writer.add_figure(f"{group}/F0", fig, step)
plt.close()
# V/UV
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
timeaxis = np.arange(len(lf0)) * 0.005
ax.plot(timeaxis, vuv, linewidth=2, label="Target V/UV")
ax.plot(timeaxis, pred_vuv, "--", linewidth=2, label="Predicted V/UV")
ax.set_xlabel("Time [sec]")
ax.set_ylabel("V/UV")
ax.set_xlim(timeaxis[0], timeaxis[-1])
plt.legend(loc="upper right", borderaxespad=0, ncol=2)
plt.tight_layout()
writer.add_figure(f"{group}/VUV", fig, step)
plt.close()
# Spectrogram
fig, ax = plt.subplots(2, 1, figsize=(8, 6))
ax[0].set_title("Reference spectrogram")
ax[1].set_title("Predicted spectrogram")
if use_world_codec:
if sp is not None:
spectrogram = sp.T
else:
spectrogram = pyworld.decode_spectral_envelope(
np.ascontiguousarray(mgc), sr, fftlen
).T
else:
spectrogram = pysptk.mc2sp(mgc, fftlen=fftlen, alpha=alpha).T
mesh = librosa.display.specshow(
librosa.power_to_db(np.abs(spectrogram), ref=np.max),
sr=sr,
hop_length=hop_length,
x_axis="time",
y_axis="hz",
cmap="viridis",
ax=ax[0],
)
_colorbar_wrap(fig, mesh, ax[0])
if use_world_codec:
if pred_sp is not None:
pred_spectrogram = pred_sp.T
else:
pred_spectrogram = pyworld.decode_spectral_envelope(
np.ascontiguousarray(pred_mgc), sr, fftlen
).T
else:
pred_spectrogram = pysptk.mc2sp(
np.ascontiguousarray(pred_mgc), fftlen=fftlen, alpha=alpha
).T
mesh = librosa.display.specshow(
librosa.power_to_db(np.abs(pred_spectrogram), ref=np.max),
sr=sr,
hop_length=hop_length,
x_axis="time",
y_axis="hz",
cmap="viridis",
ax=ax[1],
)
_colorbar_wrap(fig, mesh, ax[1])
for a in ax:
a.set_ylim(0, sr // 2)
plt.tight_layout()
writer.add_figure(f"{group}/Spectrogram", fig, step)
plt.close()
# Aperiodicity
fig, ax = plt.subplots(2, 1, figsize=(8, 6))
ax[0].set_title("Reference aperiodicity")
ax[1].set_title("Predicted aperiodicity")
if use_mcep_aperiodicity:
aperiodicity = pysptk.mc2sp(bap, fftlen=fftlen, alpha=alpha).T
else:
aperiodicity = pyworld.decode_aperiodicity(bap.astype(np.float64), sr, fftlen).T
mesh = librosa.display.specshow(
20 * np.log10(aperiodicity),
sr=sr,
hop_length=hop_length,
x_axis="time",
y_axis="linear",
cmap="viridis",
ax=ax[0],
)
_colorbar_wrap(fig, mesh, ax[0])
if use_mcep_aperiodicity:
pred_aperiodicity = pysptk.mc2sp(
np.ascontiguousarray(pred_bap), fftlen=fftlen, alpha=alpha
).T
else:
pred_aperiodicity = pyworld.decode_aperiodicity(
np.ascontiguousarray(pred_bap).astype(np.float64), sr, fftlen
).T
mesh = librosa.display.specshow(
20 * np.log10(pred_aperiodicity),
sr=sr,
hop_length=hop_length,
x_axis="time",
y_axis="linear",
cmap="viridis",
ax=ax[1],
)
_colorbar_wrap(fig, mesh, ax[1])
for a in ax:
a.set_ylim(0, sr // 2)
plt.tight_layout()
writer.add_figure(f"{group}/Aperiodicity", fig, step)
plt.close()
# GV for mgc
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
ax.plot(np.var(mgc, axis=0), "--", linewidth=2, label="Natural: global variances")
ax.plot(np.var(pred_mgc, axis=0), linewidth=2, label="Generated: global variances")
ax.legend()
ax.set_yscale("log")
ax.set_xlabel("Dimension of mgc")
min_ = min(np.var(mgc, axis=0).min(), np.var(pred_mgc, axis=0).min(), 1e-4)
ax.set_ylim(min_)
plt.tight_layout()
writer.add_figure(f"{group}/GV_mgc", fig, step)
plt.close()
# GV for bap
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
ax.plot(np.var(bap, axis=0), "--", linewidth=2, label="Natural: global variances")
ax.plot(np.var(pred_bap, axis=0), linewidth=2, label="Generated: global variances")
ax.legend()
ax.set_yscale("log")
ax.set_xlabel("Dimension of bap")
min_ = min(np.var(bap, axis=0).min(), np.var(pred_bap, axis=0).min(), 10)
ax.set_ylim(min_)
plt.tight_layout()
writer.add_figure(f"{group}/GV_bap", fig, step)
plt.close()
@torch.no_grad()
def plot_mel_params(
step,
writer,
logmel,
lf0,
vuv,
pred_logmel,
pred_lf0,
pred_vuv,
lf0_score,
group,
sr,
):
if dist.is_initialized() and dist.get_rank() != 0:
return
assert writer is not None
hop_length = int(sr * 0.005)
# Log-F0
if lf0_score is not None:
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
timeaxis = np.arange(len(lf0)) * 0.005
ax.plot(timeaxis, lf0, linewidth=1.5, color="tab:blue", label="Target log-F0")
ax.plot(
timeaxis,
pred_lf0,
linewidth=1.5,
color="tab:orange",
label="Predicted log-F0",
)
ax.plot(
timeaxis,
lf0_score,
"--",
color="gray",
linewidth=1.3,
label="Note log-F0",
)
ax.set_xlabel("Time [sec]")
ax.set_ylabel("Log-frequency [Hz]")
ax.set_xlim(timeaxis[0], timeaxis[-1])
ax.set_ylim(
min(min(lf0_score[lf0_score > 0]), min(lf0), min(pred_lf0)) - 0.1,
max(max(lf0_score), max(lf0), max(pred_lf0)) + 0.1,
)
plt.legend(loc="upper right", borderaxespad=0, ncol=3)
plt.tight_layout()
writer.add_figure(f"{group}/ContinuousLogF0", fig, step)
plt.close()
f0_score = lf0_score.copy()
note_indices = f0_score > 0
f0_score[note_indices] = np.exp(lf0_score[note_indices])
# F0
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
timeaxis = np.arange(len(lf0)) * 0.005
f0 = np.exp(lf0)
f0[vuv < 0.5] = 0
pred_f0 = np.exp(pred_lf0)
pred_f0[pred_vuv < 0.5] = 0
ax.plot(
timeaxis,
f0,
linewidth=1.5,
color="tab:blue",
label="Target F0",
)
ax.plot(
timeaxis,
pred_f0,
linewidth=1.5,
color="tab:orange",
label="Predicted F0",
)
ax.plot(timeaxis, f0_score, "--", linewidth=1.3, color="gray", label="Note F0")
ax.set_xlabel("Time [sec]")
ax.set_ylabel("Frequency [Hz]")
ax.set_xlim(timeaxis[0], timeaxis[-1])
ax.set_ylim(
min(min(f0_score[f0_score > 0]), min(np.exp(lf0)), min(np.exp(pred_lf0)))
- 10,
max(max(f0_score), max(np.exp(lf0)), max(np.exp(pred_lf0))) + 10,
)
plt.legend(loc="upper right", borderaxespad=0, ncol=3)
plt.tight_layout()
writer.add_figure(f"{group}/F0", fig, step)
plt.close()
# V/UV
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
timeaxis = np.arange(len(lf0)) * 0.005
ax.plot(timeaxis, vuv, linewidth=2, label="Target V/UV")
ax.plot(timeaxis, pred_vuv, "--", linewidth=2, label="Predicted V/UV")
ax.set_xlabel("Time [sec]")
ax.set_ylabel("V/UV")
ax.set_xlim(timeaxis[0], timeaxis[-1])
plt.legend(loc="upper right", borderaxespad=0, ncol=2)
plt.tight_layout()
writer.add_figure(f"{group}/VUV", fig, step)
plt.close()
# Mel-spectrogram
fig, ax = plt.subplots(2, 1, figsize=(8, 6))
ax[0].set_title("Reference spectrogram")
ax[1].set_title("Predicted spectrogram")
mesh = librosa.display.specshow(
logmel.T,
sr=sr,
hop_length=hop_length,
x_axis="time",
y_axis="hz",
cmap="viridis",
ax=ax[0],
)
_colorbar_wrap(fig, mesh, ax[0])
mesh = librosa.display.specshow(
pred_logmel.T,
sr=sr,
hop_length=hop_length,
x_axis="time",
y_axis="hz",
cmap="viridis",
ax=ax[1],
)
_colorbar_wrap(fig, mesh, ax[1])
for a in ax:
a.set_ylim(0, sr // 2)
plt.tight_layout()
writer.add_figure(f"{group}/Spectrogram", fig, step)
plt.close()
| 80,222 | 32.538043 | 95 | py |
nnsvs | nnsvs-master/nnsvs/acoustic_models/multistream.py | import torch
from nnsvs.acoustic_models.util import pad_inference
from nnsvs.base import BaseModel, PredictionType
from nnsvs.multistream import split_streams
from torch import nn
__all__ = [
"MultistreamSeparateF0ParametricModel",
"NPSSMultistreamParametricModel",
"NPSSMDNMultistreamParametricModel",
"MultistreamSeparateF0MelModel",
"MDNMultistreamSeparateF0MelModel",
]
class MultistreamSeparateF0ParametricModel(BaseModel):
"""Multi-stream model with a separate F0 prediction model
acoustic features: [MGC, LF0, VUV, BAP]
vib_model and vib_flags_model are optional and will be likely to be removed.
Conditional dependency:
p(MGC, LF0, VUV, BAP |C) = p(LF0|C) p(MGC|LF0, C) p(BAP|LF0, C) p(VUV|LF0, C)
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
stream_sizes (list): List of stream sizes.
reduction_factor (int): Reduction factor.
encoder (nn.Module): A shared encoder.
mgc_model (nn.Module): MGC prediction model.
lf0_model (nn.Module): log-F0 prediction model.
vuv_model (nn.Module): V/UV prediction model.
bap_model (nn.Module): BAP prediction model.
in_rest_idx (int): Index of the rest symbol in the input features.
in_lf0_idx (int): index of lf0 in input features
in_lf0_min (float): minimum value of lf0 in the training data of input features
in_lf0_max (float): maximum value of lf0 in the training data of input features
out_lf0_idx (int): index of lf0 in output features. Typically 180.
out_lf0_mean (float): mean of lf0 in the training data of output features
out_lf0_scale (float): scale of lf0 in the training data of output features
lf0_teacher_forcing (bool): Whether to use teacher forcing for F0 prediction.
"""
def __init__(
self,
in_dim: int,
out_dim: int,
stream_sizes: list,
reduction_factor: int,
encoder: nn.Module,
mgc_model: nn.Module,
lf0_model: nn.Module,
vuv_model: nn.Module,
bap_model: nn.Module,
vib_model: nn.Module = None, # kept as is for compatibility
vib_flags_model: nn.Module = None, # kept as is for compatibility
# NOTE: you must carefully set the following parameters
in_rest_idx=1,
in_lf0_idx=300,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=180,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
lf0_teacher_forcing=True,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.stream_sizes = stream_sizes
self.reduction_factor = reduction_factor
self.lf0_teacher_forcing = lf0_teacher_forcing
assert len(stream_sizes) in [4]
self.encoder = encoder
if self.encoder is not None:
assert not encoder.is_autoregressive()
self.mgc_model = mgc_model
self.lf0_model = lf0_model
self.vuv_model = vuv_model
self.bap_model = bap_model
self.in_rest_idx = in_rest_idx
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
def has_residual_lf0_prediction(self):
return True
def _set_lf0_params(self):
# Special care for residual F0 prediction models
# NOTE: don't overwrite out_lf0_idx and in_lf0_idx
if hasattr(self.lf0_model, "out_lf0_mean"):
self.lf0_model.in_lf0_min = self.in_lf0_min
self.lf0_model.in_lf0_max = self.in_lf0_max
self.lf0_model.out_lf0_mean = self.out_lf0_mean
self.lf0_model.out_lf0_scale = self.out_lf0_scale
def is_autoregressive(self):
return (
self.mgc_model.is_autoregressive()
or self.lf0_model.is_autoregressive()
or self.vuv_model.is_autoregressive()
or self.bap_model.is_autoregressive()
)
def forward(self, x, lengths=None, y=None):
self._set_lf0_params()
assert x.shape[-1] == self.in_dim
if y is not None:
# Teacher-forcing
y_mgc, y_lf0, y_vuv, y_bap = split_streams(y, self.stream_sizes)
else:
# Inference
y_mgc, y_lf0, y_vuv, y_bap = None, None, None, None
# Predict continuous log-F0 first
lf0, lf0_residual = self.lf0_model(x, lengths, y_lf0)
if self.encoder is not None:
encoder_outs = self.encoder(x, lengths)
# Concat log-F0, rest flags and the outputs of the encoder
# This may make the decoder to be aware of the input F0
rest_flags = x[:, :, self.in_rest_idx].unsqueeze(-1)
if self.lf0_teacher_forcing and y is not None:
encoder_outs = torch.cat([encoder_outs, rest_flags, y_lf0], dim=-1)
else:
encoder_outs = torch.cat([encoder_outs, rest_flags, lf0], dim=-1)
else:
encoder_outs = x
# Decoders for each stream
mgc = self.mgc_model(encoder_outs, lengths, y_mgc)
vuv = self.vuv_model(encoder_outs, lengths, y_vuv)
bap = self.bap_model(encoder_outs, lengths, y_bap)
# make a concatenated stream
has_postnet_output = (
isinstance(mgc, list)
or isinstance(lf0, list)
or isinstance(vuv, list)
or isinstance(bap, list)
)
if has_postnet_output:
outs = []
for idx in range(len(mgc)):
mgc_ = mgc[idx] if isinstance(mgc, list) else mgc
lf0_ = lf0[idx] if isinstance(lf0, list) else lf0
vuv_ = vuv[idx] if isinstance(vuv, list) else vuv
bap_ = bap[idx] if isinstance(bap, list) else bap
out = torch.cat([mgc_, lf0_, vuv_, bap_], dim=-1)
assert out.shape[-1] == self.out_dim
outs.append(out)
return outs, lf0_residual
else:
out = torch.cat([mgc, lf0, vuv, bap], dim=-1)
assert out.shape[-1] == self.out_dim
return out, lf0_residual
def inference(self, x, lengths=None):
return pad_inference(
model=self, x=x, lengths=lengths, reduction_factor=self.reduction_factor
)
class NPSSMultistreamParametricModel(BaseModel):
"""NPSS-like cascaded multi-stream model with no mixture density networks.
NPSS: :cite:t:`blaauw2017neural`
Different from the original NPSS, we don't use spectral parameters
for the inputs of aperiodicity and V/UV prediction models.
This is because
(1) D4C does not use spectral parameters as input for aperiodicity estimation.
(2) V/UV detection is done from aperiodicity at 0-3 kHz in WORLD.
In addition, f0 and VUV models dont use MDNs.
Empirically, we found the above configuration works better than the original one.
Conditional dependency:
p(MGC, LF0, VUV, BAP |C) = p(LF0|C) p(MGC|LF0, C) p(BAP|LF0, C) p(VUV|LF0, BAP, C)
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
stream_sizes (list): List of stream sizes.
lf0_model (BaseModel): Model for predicting log-F0.
mgc_model (BaseModel): Model for predicting MGC.
bap_model (BaseModel): Model for predicting BAP.
vuv_model (BaseModel): Model for predicting V/UV.
in_rest_idx (int): Index of the rest symbol in the input features.
in_lf0_idx (int): index of lf0 in input features
in_lf0_min (float): minimum value of lf0 in the training data of input features
in_lf0_max (float): maximum value of lf0 in the training data of input features
out_lf0_idx (int): index of lf0 in output features. Typically 180.
out_lf0_mean (float): mean of lf0 in the training data of output features
out_lf0_scale (float): scale of lf0 in the training data of output features
vuv_model_bap_conditioning (bool): If True, use BAP features for V/UV prediction.
vuv_model_bap0_conditioning (bool): If True, use only 0-th coef. of BAP
for V/UV prediction.
vuv_model_lf0_conditioning (bool): If True, use log-F0 features for V/UV prediction.
vuv_model_mgc_conditioning (bool): If True, use MGC features for V/UV prediction.
"""
def __init__(
self,
in_dim: int,
out_dim: int,
stream_sizes: list,
reduction_factor: int,
lf0_model: nn.Module,
mgc_model: nn.Module,
bap_model: nn.Module,
vuv_model: nn.Module,
# NOTE: you must carefully set the following parameters
in_rest_idx=0,
in_lf0_idx=51,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=60,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
npss_style_conditioning=False,
vuv_model_bap_conditioning=True,
vuv_model_bap0_conditioning=False,
vuv_model_lf0_conditioning=True,
vuv_model_mgc_conditioning=False,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.stream_sizes = stream_sizes
self.reduction_factor = reduction_factor
self.vuv_model_bap_conditioning = vuv_model_bap_conditioning
self.vuv_model_bap0_conditioning = vuv_model_bap0_conditioning
self.vuv_model_lf0_conditioning = vuv_model_lf0_conditioning
self.vuv_model_mgc_conditioning = vuv_model_mgc_conditioning
assert not npss_style_conditioning, "Not supported"
assert len(stream_sizes) in [4]
self.lf0_model = lf0_model
self.mgc_model = mgc_model
self.bap_model = bap_model
self.vuv_model = vuv_model
self.in_rest_idx = in_rest_idx
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
def _set_lf0_params(self):
# Special care for residual F0 prediction models
# NOTE: don't overwrite out_lf0_idx and in_lf0_idx
if hasattr(self.lf0_model, "out_lf0_mean"):
self.lf0_model.in_lf0_min = self.in_lf0_min
self.lf0_model.in_lf0_max = self.in_lf0_max
self.lf0_model.out_lf0_mean = self.out_lf0_mean
self.lf0_model.out_lf0_scale = self.out_lf0_scale
def prediction_type(self):
return PredictionType.DETERMINISTIC
def is_autoregressive(self):
return (
self.mgc_model.is_autoregressive()
or self.lf0_model.is_autoregressive()
or self.vuv_model.is_autoregressive()
or self.bap_model.is_autoregressive()
)
def has_residual_lf0_prediction(self):
return True
def forward(self, x, lengths=None, y=None):
self._set_lf0_params()
assert x.shape[-1] == self.in_dim
is_inference = y is None
if is_inference:
y_mgc, y_lf0, y_vuv, y_bap = (
None,
None,
None,
None,
)
else:
# Teacher-forcing
outs = split_streams(y, self.stream_sizes)
y_mgc, y_lf0, y_vuv, y_bap = outs
# Predict continuous log-F0 first
if is_inference:
lf0, lf0_residual = self.lf0_model.inference(x, lengths), None
else:
lf0, lf0_residual = self.lf0_model(x, lengths, y_lf0)
# Predict spectral parameters
if is_inference:
mgc_inp = torch.cat([x, lf0], dim=-1)
mgc = self.mgc_model.inference(mgc_inp, lengths)
else:
mgc_inp = torch.cat([x, y_lf0], dim=-1)
mgc = self.mgc_model(mgc_inp, lengths, y_mgc)
# Predict aperiodic parameters
if is_inference:
bap_inp = torch.cat([x, lf0], dim=-1)
bap = self.bap_model.inference(bap_inp, lengths)
else:
bap_inp = torch.cat([x, y_lf0], dim=-1)
bap = self.bap_model(bap_inp, lengths, y_bap)
# Predict V/UV
if is_inference:
if self.vuv_model_bap0_conditioning:
bap_cond = bap[:, :, 0:1]
else:
bap_cond = bap
# full cond: (x, mgc, lf0, bap)
vuv_inp = [x]
if self.vuv_model_mgc_conditioning:
vuv_inp.append(mgc)
if self.vuv_model_bap_conditioning:
vuv_inp.append(bap_cond)
if self.vuv_model_lf0_conditioning:
vuv_inp.append(lf0)
vuv_inp = torch.cat(vuv_inp, dim=-1)
vuv = self.vuv_model.inference(vuv_inp, lengths)
else:
if self.vuv_model_bap0_conditioning:
y_bap_cond = y_bap[:, :, 0:1]
else:
y_bap_cond = y_bap
vuv_inp = [x]
if self.vuv_model_mgc_conditioning:
vuv_inp.append(y_mgc)
if self.vuv_model_bap_conditioning:
vuv_inp.append(y_bap_cond)
if self.vuv_model_lf0_conditioning:
vuv_inp.append(y_lf0)
vuv_inp = torch.cat(vuv_inp, dim=-1)
vuv = self.vuv_model(vuv_inp, lengths, y_vuv)
# make a concatenated stream
has_postnet_output = (
isinstance(mgc, list) or isinstance(bap, list) or isinstance(vuv, list)
)
if has_postnet_output:
outs = []
for idx in range(len(mgc)):
mgc_ = mgc[idx] if isinstance(mgc, list) else mgc
lf0_ = lf0[idx] if isinstance(lf0, list) else lf0
vuv_ = vuv[idx] if isinstance(vuv, list) else vuv
bap_ = bap[idx] if isinstance(bap, list) else bap
out = torch.cat([mgc_, lf0_, vuv_, bap_], dim=-1)
assert out.shape[-1] == self.out_dim
outs.append(out)
else:
outs = torch.cat([mgc, lf0, vuv, bap], dim=-1)
assert outs.shape[-1] == self.out_dim
return outs, lf0_residual
def inference(self, x, lengths=None):
return pad_inference(
model=self,
x=x,
lengths=lengths,
reduction_factor=self.reduction_factor,
mdn=False,
)
class NPSSMDNMultistreamParametricModel(BaseModel):
"""NPSS-like cascaded multi-stream parametric model with mixture density networks.
.. note::
This class was originally designed to be used with MDNs. However, the internal
design was changed to make it work with non-MDN and diffusion models. For example,
you can use non-MDN models for MGC prediction.
NPSS: :cite:t:`blaauw2017neural`
acoustic features: [MGC, LF0, VUV, BAP]
Conditional dependency:
p(MGC, LF0, VUV, BAP |C) = p(LF0|C) p(MGC|LF0, C) p(BAP|LF0, C) p(VUV|LF0, BAP, C)
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
stream_sizes (list): List of stream sizes.
lf0_model (BaseModel): Model for predicting log-F0.
mgc_model (BaseModel): Model for predicting MGC.
bap_model (BaseModel): Model for predicting BAP.
vuv_model (BaseModel): Model for predicting V/UV.
in_rest_idx (int): Index of the rest symbol in the input features.
in_lf0_idx (int): index of lf0 in input features
in_lf0_min (float): minimum value of lf0 in the training data of input features
in_lf0_max (float): maximum value of lf0 in the training data of input features
out_lf0_idx (int): index of lf0 in output features. Typically 180.
out_lf0_mean (float): mean of lf0 in the training data of output features
out_lf0_scale (float): scale of lf0 in the training data of output features
vuv_model_bap_conditioning (bool): If True, use BAP features for V/UV prediction.
vuv_model_bap0_conditioning (bool): If True, use only 0-th coef. of BAP
for V/UV prediction.
vuv_model_lf0_conditioning (bool): If True, use log-F0 features for V/UV prediction.
vuv_model_mgc_conditioning (bool): If True, use MGC features for V/UV prediction.
"""
def __init__(
self,
in_dim: int,
out_dim: int,
stream_sizes: list,
reduction_factor: int,
lf0_model: nn.Module,
mgc_model: nn.Module,
bap_model: nn.Module,
vuv_model: nn.Module,
# NOTE: you must carefully set the following parameters
in_rest_idx=0,
in_lf0_idx=51,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=60,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
vuv_model_bap_conditioning=True,
vuv_model_bap0_conditioning=False,
vuv_model_lf0_conditioning=True,
vuv_model_mgc_conditioning=False,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.stream_sizes = stream_sizes
self.reduction_factor = reduction_factor
self.vuv_model_bap_conditioning = vuv_model_bap_conditioning
self.vuv_model_bap0_conditioning = vuv_model_bap0_conditioning
self.vuv_model_lf0_conditioning = vuv_model_lf0_conditioning
self.vuv_model_mgc_conditioning = vuv_model_mgc_conditioning
assert len(stream_sizes) in [4]
self.lf0_model = lf0_model
self.mgc_model = mgc_model
self.bap_model = bap_model
self.vuv_model = vuv_model
self.in_rest_idx = in_rest_idx
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
def _set_lf0_params(self):
# Special care for residual F0 prediction models
# NOTE: don't overwrite out_lf0_idx and in_lf0_idx
if hasattr(self.lf0_model, "out_lf0_mean"):
self.lf0_model.in_lf0_min = self.in_lf0_min
self.lf0_model.in_lf0_max = self.in_lf0_max
self.lf0_model.out_lf0_mean = self.out_lf0_mean
self.lf0_model.out_lf0_scale = self.out_lf0_scale
def prediction_type(self):
return PredictionType.MULTISTREAM_HYBRID
def is_autoregressive(self):
return (
self.mgc_model.is_autoregressive()
or self.lf0_model.is_autoregressive()
or self.vuv_model.is_autoregressive()
or self.bap_model.is_autoregressive()
)
def has_residual_lf0_prediction(self):
return True
def forward(self, x, lengths=None, y=None):
self._set_lf0_params()
assert x.shape[-1] == self.in_dim
is_inference = y is None
if is_inference:
y_mgc, y_lf0, y_vuv, y_bap = (
None,
None,
None,
None,
)
else:
# Teacher-forcing
outs = split_streams(y, self.stream_sizes)
y_mgc, y_lf0, y_vuv, y_bap = outs
# Predict continuous log-F0 first
if is_inference:
lf0, lf0_residual = self.lf0_model.inference(x, lengths), None
if self.lf0_model.prediction_type() == PredictionType.PROBABILISTIC:
lf0_cond = lf0[0]
else:
lf0_cond = lf0
else:
lf0, lf0_residual = self.lf0_model(x, lengths, y_lf0)
# Predict spectral parameters
if is_inference:
mgc_inp = torch.cat([x, lf0_cond], dim=-1)
mgc = self.mgc_model.inference(mgc_inp, lengths)
else:
mgc_inp = torch.cat([x, y_lf0], dim=-1)
mgc = self.mgc_model(mgc_inp, lengths, y_mgc)
# Predict aperiodic parameters
if is_inference:
bap_inp = torch.cat([x, lf0_cond], dim=-1)
bap = self.bap_model.inference(bap_inp, lengths)
else:
bap_inp = torch.cat([x, y_lf0], dim=-1)
bap = self.bap_model(bap_inp, lengths, y_bap)
# Predict V/UV
if is_inference:
if self.bap_model.prediction_type() == PredictionType.PROBABILISTIC:
bap_cond = bap[0]
else:
bap_cond = bap
if self.mgc_model.prediction_type() == PredictionType.PROBABILISTIC:
mgc_cond = mgc[0]
else:
mgc_cond = mgc
if self.vuv_model_bap0_conditioning:
bap_cond = bap_cond[:, :, 0:1]
# full cond: (x, mgc, lf0, bap)
vuv_inp = [x]
if self.vuv_model_mgc_conditioning:
vuv_inp.append(mgc_cond)
if self.vuv_model_lf0_conditioning:
vuv_inp.append(lf0_cond)
if self.vuv_model_bap_conditioning:
vuv_inp.append(bap_cond)
vuv_inp = torch.cat(vuv_inp, dim=-1)
vuv = self.vuv_model.inference(vuv_inp, lengths)
else:
if self.vuv_model_bap0_conditioning:
y_bap_cond = y_bap[:, :, 0:1]
else:
y_bap_cond = y_bap
vuv_inp = [x]
if self.vuv_model_mgc_conditioning:
vuv_inp.append(y_mgc)
if self.vuv_model_lf0_conditioning:
vuv_inp.append(y_lf0)
if self.vuv_model_bap_conditioning:
vuv_inp.append(y_bap_cond)
vuv_inp = torch.cat(vuv_inp, dim=-1)
vuv = self.vuv_model(vuv_inp, lengths, y_vuv)
if is_inference:
if self.lf0_model.prediction_type() == PredictionType.PROBABILISTIC:
lf0_ = lf0[0]
else:
lf0_ = lf0
if self.bap_model.prediction_type() == PredictionType.PROBABILISTIC:
bap_ = bap[0]
else:
bap_ = bap
if self.mgc_model.prediction_type() == PredictionType.PROBABILISTIC:
mgc_ = mgc[0]
else:
mgc_ = mgc
out = torch.cat([mgc_, lf0_, vuv, bap_], dim=-1)
assert out.shape[-1] == self.out_dim
# TODO: better design
return out, out
else:
return (mgc, lf0, vuv, bap), lf0_residual
def inference(self, x, lengths=None):
return pad_inference(
model=self,
x=x,
lengths=lengths,
reduction_factor=self.reduction_factor,
mdn=True,
)
class MultistreamSeparateF0MelModel(BaseModel):
"""Multi-stream model with a separate F0 prediction model (mel-version)
Conditional dependency:
p(MEL, LF0, VUV|C) = p(LF0|C) p(MEL|LF0, C) p(VUV|LF0, C)
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
stream_sizes (list): List of stream sizes.
reduction_factor (int): Reduction factor.
encoder (nn.Module): A shared encoder.
mel_model (nn.Module): MEL prediction model.
lf0_model (nn.Module): log-F0 prediction model.
vuv_model (nn.Module): V/UV prediction model.
in_rest_idx (int): Index of the rest symbol in the input features.
in_lf0_idx (int): index of lf0 in input features
in_lf0_min (float): minimum value of lf0 in the training data of input features
in_lf0_max (float): maximum value of lf0 in the training data of input features
out_lf0_idx (int): index of lf0 in output features. Typically 180.
out_lf0_mean (float): mean of lf0 in the training data of output features
out_lf0_scale (float): scale of lf0 in the training data of output features
"""
def __init__(
self,
in_dim: int,
out_dim: int,
stream_sizes: list,
reduction_factor: int,
encoder: nn.Module,
mel_model: nn.Module,
lf0_model: nn.Module,
vuv_model: nn.Module,
# NOTE: you must carefully set the following parameters
in_rest_idx=1,
in_lf0_idx=300,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=180,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.stream_sizes = stream_sizes
self.reduction_factor = reduction_factor
assert len(stream_sizes) == 3
self.encoder = encoder
if self.encoder is not None:
assert not encoder.is_autoregressive()
self.mel_model = mel_model
self.lf0_model = lf0_model
self.vuv_model = vuv_model
self.in_rest_idx = in_rest_idx
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
def _set_lf0_params(self):
# Special care for residual F0 prediction models
# NOTE: don't overwrite out_lf0_idx and in_lf0_idx
if hasattr(self.lf0_model, "out_lf0_mean"):
self.lf0_model.in_lf0_min = self.in_lf0_min
self.lf0_model.in_lf0_max = self.in_lf0_max
self.lf0_model.out_lf0_mean = self.out_lf0_mean
self.lf0_model.out_lf0_scale = self.out_lf0_scale
def is_autoregressive(self):
return (
self.mel_model.is_autoregressive()
or self.lf0_model.is_autoregressive()
or self.vuv_model.is_autoregressive()
)
def has_residual_lf0_prediction(self):
return True
def forward(self, x, lengths=None, y=None):
self._set_lf0_params()
assert x.shape[-1] == self.in_dim
if y is not None:
# Teacher-forcing
outs = split_streams(y, self.stream_sizes)
y_mel, y_lf0, y_vuv = outs
else:
# Inference
y_mel, y_lf0, y_vuv = (
None,
None,
None,
)
# Predict continuous log-F0 first
lf0, lf0_residual = self.lf0_model(x, lengths, y_lf0)
if self.encoder is not None:
encoder_outs = self.encoder(x, lengths)
# Concat log-F0, rest flags and the outputs of the encoder
# This may make the decoder to be aware of the input F0
rest_flags = x[:, :, self.in_rest_idx].unsqueeze(-1)
if y is not None:
encoder_outs = torch.cat([encoder_outs, rest_flags, y_lf0], dim=-1)
else:
encoder_outs = torch.cat([encoder_outs, rest_flags, lf0], dim=-1)
else:
encoder_outs = x
# Decoders for each stream
mel = self.mel_model(encoder_outs, lengths, y_mel)
vuv = self.vuv_model(encoder_outs, lengths, y_vuv)
# make a concatenated stream
has_postnet_output = (
isinstance(mel, list) or isinstance(lf0, list) or isinstance(vuv, list)
)
if has_postnet_output:
outs = []
for idx in range(len(mel)):
mel_ = mel[idx] if isinstance(mel, list) else mel
lf0_ = lf0[idx] if isinstance(lf0, list) else lf0
vuv_ = vuv[idx] if isinstance(vuv, list) else vuv
out = torch.cat([mel_, lf0_, vuv_], dim=-1)
assert out.shape[-1] == self.out_dim
outs.append(out)
return outs, lf0_residual
else:
out = torch.cat(
[
mel,
lf0,
vuv,
],
dim=-1,
)
assert out.shape[-1] == self.out_dim
return out, lf0_residual
def inference(self, x, lengths=None):
return pad_inference(
model=self, x=x, lengths=lengths, reduction_factor=self.reduction_factor
)
class MDNMultistreamSeparateF0MelModel(BaseModel):
"""Multi-stream model with a separate F0 model (mel-version) and mDN
V/UV prediction is performed given a mel-spectrogram.
Conditional dependency:
p(MEL, LF0, VUV|C) = p(LF0|C) p(MEL|LF0, C) p(VUV|LF0, MEL, C)
.. note::
This class was originally designed to be used with MDNs. However, the internal
design was changed to make it work with non-MDN and diffusion models. For example,
you can use non-MDN models for mel prediction.
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
stream_sizes (list): List of stream sizes.
reduction_factor (int): Reduction factor.
encoder (nn.Module): A shared encoder.
mel_model (nn.Module): MEL prediction model.
lf0_model (nn.Module): log-F0 prediction model.
vuv_model (nn.Module): V/UV prediction model.
in_rest_idx (int): Index of the rest symbol in the input features.
in_lf0_idx (int): index of lf0 in input features
in_lf0_min (float): minimum value of lf0 in the training data of input features
in_lf0_max (float): maximum value of lf0 in the training data of input features
out_lf0_idx (int): index of lf0 in output features. Typically 180.
out_lf0_mean (float): mean of lf0 in the training data of output features
out_lf0_scale (float): scale of lf0 in the training data of output features
vuv_model_lf0_conditioning (bool): If True, use log-F0 features for V/UV prediction.
vuv_model_mel_conditioning (bool): If True, use mel features for V/UV prediction.
"""
def __init__(
self,
in_dim: int,
out_dim: int,
stream_sizes: list,
reduction_factor: int,
mel_model: nn.Module,
lf0_model: nn.Module,
vuv_model: nn.Module,
# NOTE: you must carefully set the following parameters
in_rest_idx=0,
in_lf0_idx=51,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=60,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
vuv_model_lf0_conditioning=True,
vuv_model_mel_conditioning=True,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.stream_sizes = stream_sizes
self.reduction_factor = reduction_factor
self.vuv_model_lf0_conditioning = vuv_model_lf0_conditioning
self.vuv_model_mel_conditioning = vuv_model_mel_conditioning
assert len(stream_sizes) in [3]
self.mel_model = mel_model
self.lf0_model = lf0_model
self.vuv_model = vuv_model
self.in_rest_idx = in_rest_idx
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
def _set_lf0_params(self):
# Special care for residual F0 prediction models
# NOTE: don't overwrite out_lf0_idx and in_lf0_idx
if hasattr(self.lf0_model, "out_lf0_mean"):
self.lf0_model.in_lf0_min = self.in_lf0_min
self.lf0_model.in_lf0_max = self.in_lf0_max
self.lf0_model.out_lf0_mean = self.out_lf0_mean
self.lf0_model.out_lf0_scale = self.out_lf0_scale
def prediction_type(self):
return PredictionType.MULTISTREAM_HYBRID
def is_autoregressive(self):
return (
self.mel_model.is_autoregressive()
or self.lf0_model.is_autoregressive()
or self.vuv_model.is_autoregressive()
)
def has_residual_lf0_prediction(self):
return True
def forward(self, x, lengths=None, y=None):
self._set_lf0_params()
assert x.shape[-1] == self.in_dim
is_inference = y is None
if y is not None:
# Teacher-forcing
outs = split_streams(y, self.stream_sizes)
y_mel, y_lf0, y_vuv = outs
else:
# Inference
y_mel, y_lf0, y_vuv = (
None,
None,
None,
)
# Predict continuous log-F0 first
if is_inference:
lf0, lf0_residual = self.lf0_model.inference(x, lengths), None
if self.lf0_model.prediction_type() == PredictionType.PROBABILISTIC:
lf0_cond = lf0[0]
else:
lf0_cond = lf0
else:
lf0, lf0_residual = self.lf0_model(x, lengths, y_lf0)
# Predict mel
if is_inference:
mel_inp = torch.cat([x, lf0_cond], dim=-1)
mel = self.mel_model.inference(mel_inp, lengths)
else:
mel_inp = torch.cat([x, y_lf0], dim=-1)
mel = self.mel_model(mel_inp, lengths, y_mel)
# Predict V/UV
if is_inference:
if self.mel_model.prediction_type() == PredictionType.PROBABILISTIC:
mel_cond = mel[0]
else:
mel_cond = mel
# full cond: (x, lf0, mel)
vuv_inp = [x]
if self.vuv_model_lf0_conditioning:
vuv_inp.append(lf0_cond)
if self.vuv_model_mel_conditioning:
vuv_inp.append(mel_cond)
vuv_inp = torch.cat(vuv_inp, dim=-1)
vuv = self.vuv_model.inference(vuv_inp, lengths)
else:
vuv_inp = [x]
if self.vuv_model_lf0_conditioning:
vuv_inp.append(y_lf0)
if self.vuv_model_mel_conditioning:
vuv_inp.append(y_mel)
vuv_inp = torch.cat(vuv_inp, dim=-1)
vuv = self.vuv_model(vuv_inp, lengths, y_vuv)
if is_inference:
if self.lf0_model.prediction_type() == PredictionType.PROBABILISTIC:
lf0_ = lf0[0]
else:
lf0_ = lf0
if self.mel_model.prediction_type() == PredictionType.PROBABILISTIC:
mel_ = mel[0]
else:
mel_ = mel
out = torch.cat([mel_, lf0_, vuv], dim=-1)
assert out.shape[-1] == self.out_dim
# TODO: better design
return out, out
else:
return (mel, lf0, vuv), lf0_residual
def inference(self, x, lengths=None):
return pad_inference(
model=self,
x=x,
lengths=lengths,
reduction_factor=self.reduction_factor,
mdn=True,
)
| 34,956 | 36.267591 | 92 | py |
nnsvs | nnsvs-master/nnsvs/acoustic_models/tacotron.py | import torch
from nnsvs.acoustic_models.util import pad_inference
from nnsvs.base import BaseModel
from nnsvs.tacotron.decoder import MDNNonAttentiveDecoder
from nnsvs.tacotron.decoder import NonAttentiveDecoder as TacotronNonAttentiveDecoder
from nnsvs.tacotron.postnet import Postnet as TacotronPostnet
from nnsvs.util import init_weights
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
__all__ = [
"NonAttentiveDecoder",
"MDNNonAttentiveDecoder",
"BiLSTMNonAttentiveDecoder",
"BiLSTMMDNNonAttentiveDecoder",
]
class NonAttentiveDecoder(TacotronNonAttentiveDecoder):
"""Non-attentive autoregresive model based on the duration-informed Tacotron
Duration-informed Tacotron :cite:t:`okamoto2019tacotron`.
.. note::
if the target features of the decoder is normalized to N(0, 1), consider
setting the initial value carefully so that it roughly matches the value of
silence. e.g., -4 to -10.
``initial_value=0`` works okay for large databases but I found that -4 or
lower worked better for smaller databases such as nit-song070.
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
layers (int): Number of LSTM layers.
hidden_dim (int): Hidden dimension of LSTM.
prenet_layers (int): Number of prenet layers.
prenet_hidden_dim (int): Hidden dimension of prenet.
prenet_dropout (float): Dropout rate of prenet.
zoneout (float): Zoneout rate.
reduction_factor (int): Reduction factor.
downsample_by_conv (bool): If True, downsampling is performed by convolution.
postnet_layers (int): Number of postnet layers.
postnet_channels (int): Number of postnet channels.
postnet_kernel_size (int): Kernel size of postnet.
postnet_dropout (float): Dropout rate of postnet.
init_type (str): Initialization type.
eval_dropout (bool): If True, dropout is applied in evaluation.
initial_value (float) : initial value for the autoregressive decoder.
"""
def __init__(
self,
in_dim=512,
out_dim=80,
layers=2,
hidden_dim=1024,
prenet_layers=2,
prenet_hidden_dim=256,
prenet_dropout=0.5,
zoneout=0.1,
reduction_factor=1,
downsample_by_conv=False,
postnet_layers=0,
postnet_channels=512,
postnet_kernel_size=5,
postnet_dropout=0.0,
init_type="none",
eval_dropout=True,
prenet_noise_std=0.0,
initial_value=0.0,
):
super().__init__(
in_dim=in_dim,
out_dim=out_dim,
layers=layers,
hidden_dim=hidden_dim,
prenet_layers=prenet_layers,
prenet_hidden_dim=prenet_hidden_dim,
prenet_dropout=prenet_dropout,
zoneout=zoneout,
reduction_factor=reduction_factor,
downsample_by_conv=downsample_by_conv,
eval_dropout=eval_dropout,
prenet_noise_std=prenet_noise_std,
initial_value=initial_value,
)
if postnet_layers > 0:
self.postnet = TacotronPostnet(
out_dim,
layers=postnet_layers,
channels=postnet_channels,
kernel_size=postnet_kernel_size,
dropout=postnet_dropout,
)
else:
self.postnet = None
init_weights(self, init_type)
def forward(self, x, lengths=None, y=None):
outs = super().forward(x, lengths, y)
if self.postnet is not None:
# NOTE: `outs.clone()`` is necessary to compute grad on both outs and outs_fine
outs_fine = outs + self.postnet(outs.transpose(1, 2).clone()).transpose(
1, 2
)
return [outs, outs_fine]
else:
return outs
def inference(self, x, lengths=None):
return pad_inference(
model=self, x=x, lengths=lengths, reduction_factor=self.reduction_factor
)
class BiLSTMNonAttentiveDecoder(BaseModel):
"""BiLSTM-based encoder + NonAttentiveDecoder
The encoder is based on the arthitecture of the Sinsy acoustic model.
Args:
in_dim (int): Input dimension.
ff_hidden_dim (int): Hidden dimension of feed-forward layers in the encoder.
conv_hidden_dim (int): Hidden dimension of convolution layers in the encoder.
lstm_hidden_dim (int): Hidden dimension of LSTM layers in the encoder.
num_lstm_layers (int): Number of LSTM layers in the encoder.
out_dim (int): Output dimension.
layers (int): Number of LSTM layers.
hidden_dim (int): Hidden dimension of LSTM.
prenet_layers (int): Number of prenet layers.
prenet_hidden_dim (int): Hidden dimension of prenet.
prenet_dropout (float): Dropout rate of prenet.
zoneout (float): Zoneout rate.
reduction_factor (int): Reduction factor.
downsample_by_conv (bool): If True, downsampling is performed by convolution.
postnet_layers (int): Number of postnet layers.
postnet_channels (int): Number of postnet channels.
postnet_kernel_size (int): Kernel size of postnet.
postnet_dropout (float): Dropout rate of postnet.
in_ph_start_idx (int): Start index of phoneme features.
in_ph_end_idx (int): End index of phoneme features.
embed_dim (int): Embedding dimension.
init_type (str): Initialization type.
eval_dropout (bool): If True, dropout is applied in evaluation.
initial_value (float) : initial value for the autoregressive decoder.
"""
def __init__(
self,
in_dim=512,
ff_hidden_dim=2048,
conv_hidden_dim=1024,
lstm_hidden_dim=256,
num_lstm_layers=2,
out_dim=80,
decoder_layers=2,
decoder_hidden_dim=1024,
prenet_layers=2,
prenet_hidden_dim=256,
prenet_dropout=0.5,
zoneout=0.1,
reduction_factor=1,
downsample_by_conv=False,
postnet_layers=0,
postnet_channels=512,
postnet_kernel_size=5,
postnet_dropout=0.0,
in_ph_start_idx: int = 1,
in_ph_end_idx: int = 50,
embed_dim=None,
init_type="none",
eval_dropout=True,
prenet_noise_std=0.0,
initial_value=0.0,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.in_ph_start_idx = in_ph_start_idx
self.in_ph_end_idx = in_ph_end_idx
self.num_vocab = in_ph_end_idx - in_ph_start_idx
self.embed_dim = embed_dim
self.reduction_factor = reduction_factor
if self.embed_dim is not None:
assert in_dim > self.num_vocab
self.emb = nn.Embedding(self.num_vocab, embed_dim)
self.fc_in = nn.Linear(in_dim - self.num_vocab, embed_dim)
ff_in_dim = embed_dim
else:
ff_in_dim = in_dim
# Encoder
# NOTE: can be simply replaced by a BiLSTM?
# so far I use sinsy like architecture
self.ff = nn.Sequential(
nn.Linear(ff_in_dim, ff_hidden_dim),
nn.ReLU(),
nn.Linear(ff_hidden_dim, ff_hidden_dim),
nn.ReLU(),
nn.Linear(ff_hidden_dim, ff_hidden_dim),
nn.ReLU(),
)
self.conv = nn.Sequential(
nn.ReflectionPad1d(3),
nn.Conv1d(ff_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
nn.ReflectionPad1d(3),
nn.Conv1d(conv_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
nn.ReflectionPad1d(3),
nn.Conv1d(conv_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
)
self.lstm = nn.LSTM(
conv_hidden_dim,
lstm_hidden_dim,
num_lstm_layers,
bidirectional=True,
batch_first=True,
dropout=0.0,
)
# Autoregressive decoder
decoder_in_dim = 2 * lstm_hidden_dim
self.decoder = TacotronNonAttentiveDecoder(
in_dim=decoder_in_dim,
out_dim=out_dim,
layers=decoder_layers,
hidden_dim=decoder_hidden_dim,
prenet_layers=prenet_layers,
prenet_hidden_dim=prenet_hidden_dim,
prenet_dropout=prenet_dropout,
zoneout=zoneout,
reduction_factor=reduction_factor,
downsample_by_conv=downsample_by_conv,
eval_dropout=eval_dropout,
prenet_noise_std=prenet_noise_std,
initial_value=initial_value,
)
if postnet_layers > 0:
self.postnet = TacotronPostnet(
out_dim,
layers=postnet_layers,
channels=postnet_channels,
kernel_size=postnet_kernel_size,
dropout=postnet_dropout,
)
else:
self.postnet = None
init_weights(self, init_type)
def is_autoregressive(self):
return self.decoder.is_autoregressive()
def forward(self, x, lengths=None, y=None):
if isinstance(lengths, torch.Tensor):
lengths = lengths.to("cpu")
if self.embed_dim is not None:
x_first, x_ph_onehot, x_last = torch.split(
x,
[
self.in_ph_start_idx,
self.num_vocab,
self.in_dim - self.num_vocab - self.in_ph_start_idx,
],
dim=-1,
)
x_ph = torch.argmax(x_ph_onehot, dim=-1)
# Make sure to have one-hot vector
assert (x_ph_onehot.sum(-1) <= 1).all()
x = self.emb(x_ph) + self.fc_in(torch.cat([x_first, x_last], dim=-1))
out = self.ff(x)
out = self.conv(out.transpose(1, 2)).transpose(1, 2)
sequence = pack_padded_sequence(out, lengths, batch_first=True)
out, _ = self.lstm(sequence)
out, _ = pad_packed_sequence(out, batch_first=True)
outs = self.decoder(out, lengths, y)
if self.postnet is not None:
# NOTE: `outs.clone()`` is necessary to compute grad on both outs and outs_fine
outs_fine = outs + self.postnet(outs.transpose(1, 2).clone()).transpose(
1, 2
)
return [outs, outs_fine]
else:
return outs
def inference(self, x, lengths=None):
return pad_inference(
model=self, x=x, lengths=lengths, reduction_factor=self.reduction_factor
)
class BiLSTMMDNNonAttentiveDecoder(BaseModel):
"""BiLSTM-based encoder + NonAttentiveDecoder (MDN version)
The encoder is based on the arthitecture of the Sinsy acoustic model.
Args:
in_dim (int): Input dimension.
ff_hidden_dim (int): Hidden dimension of feed-forward layers in the encoder.
conv_hidden_dim (int): Hidden dimension of convolution layers in the encoder.
lstm_hidden_dim (int): Hidden dimension of LSTM layers in the encoder.
num_lstm_layers (int): Number of LSTM layers in the encoder.
out_dim (int): Output dimension.
layers (int): Number of LSTM layers.
hidden_dim (int): Hidden dimension of LSTM.
prenet_layers (int): Number of prenet layers.
prenet_hidden_dim (int): Hidden dimension of prenet.
prenet_dropout (float): Dropout rate of prenet.
zoneout (float): Zoneout rate.
reduction_factor (int): Reduction factor.
downsample_by_conv (bool): If True, downsampling is performed by convolution.
num_gaussians (int): Number of Gaussians.
sampling_mode (str): Sampling mode.
postnet_layers (int): Number of postnet layers.
postnet_channels (int): Number of postnet channels.
postnet_kernel_size (int): Kernel size of postnet.
postnet_dropout (float): Dropout rate of postnet.
in_ph_start_idx (int): Start index of phoneme features.
in_ph_end_idx (int): End index of phoneme features.
embed_dim (int): Embedding dimension.
init_type (str): Initialization type.
eval_dropout (bool): If True, dropout is applied in evaluation.
initial_value (float) : initial value for the autoregressive decoder.
"""
def __init__(
self,
in_dim=512,
ff_hidden_dim=2048,
conv_hidden_dim=1024,
lstm_hidden_dim=256,
num_lstm_layers=2,
out_dim=80,
decoder_layers=2,
decoder_hidden_dim=1024,
prenet_layers=2,
prenet_hidden_dim=256,
prenet_dropout=0.5,
zoneout=0.1,
reduction_factor=1,
downsample_by_conv=False,
num_gaussians=8,
sampling_mode="mean",
in_ph_start_idx: int = 1,
in_ph_end_idx: int = 50,
embed_dim=None,
init_type="none",
eval_dropout=True,
prenet_noise_std=0,
initial_value=0.0,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.in_ph_start_idx = in_ph_start_idx
self.in_ph_end_idx = in_ph_end_idx
self.num_vocab = in_ph_end_idx - in_ph_start_idx
self.embed_dim = embed_dim
self.reduction_factor = reduction_factor
if self.embed_dim is not None:
assert in_dim > self.num_vocab
self.emb = nn.Embedding(self.num_vocab, embed_dim)
self.fc_in = nn.Linear(in_dim - self.num_vocab, embed_dim)
ff_in_dim = embed_dim
else:
ff_in_dim = in_dim
# Encoder
# NOTE: can be simply replaced by a BiLSTM?
# so far I use sinsy like architecture
self.ff = nn.Sequential(
nn.Linear(ff_in_dim, ff_hidden_dim),
nn.ReLU(),
nn.Linear(ff_hidden_dim, ff_hidden_dim),
nn.ReLU(),
nn.Linear(ff_hidden_dim, ff_hidden_dim),
nn.ReLU(),
)
self.conv = nn.Sequential(
nn.ReflectionPad1d(3),
nn.Conv1d(ff_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
nn.ReflectionPad1d(3),
nn.Conv1d(conv_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
nn.ReflectionPad1d(3),
nn.Conv1d(conv_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
)
self.lstm = nn.LSTM(
conv_hidden_dim,
lstm_hidden_dim,
num_lstm_layers,
bidirectional=True,
batch_first=True,
dropout=0.0,
)
# Autoregressive decoder
decoder_in_dim = 2 * lstm_hidden_dim
self.decoder = MDNNonAttentiveDecoder(
in_dim=decoder_in_dim,
out_dim=out_dim,
layers=decoder_layers,
hidden_dim=decoder_hidden_dim,
prenet_layers=prenet_layers,
prenet_hidden_dim=prenet_hidden_dim,
prenet_dropout=prenet_dropout,
zoneout=zoneout,
reduction_factor=reduction_factor,
downsample_by_conv=downsample_by_conv,
num_gaussians=num_gaussians,
sampling_mode=sampling_mode,
eval_dropout=eval_dropout,
prenet_noise_std=prenet_noise_std,
initial_value=initial_value,
)
init_weights(self, init_type)
def is_autoregressive(self):
return self.decoder.is_autoregressive()
def prediction_type(self):
return self.decoder.prediction_type()
def forward(self, x, lengths=None, y=None):
if isinstance(lengths, torch.Tensor):
lengths = lengths.to("cpu")
if self.embed_dim is not None:
x_first, x_ph_onehot, x_last = torch.split(
x,
[
self.in_ph_start_idx,
self.num_vocab,
self.in_dim - self.num_vocab - self.in_ph_start_idx,
],
dim=-1,
)
x_ph = torch.argmax(x_ph_onehot, dim=-1)
# Make sure to have one-hot vector
assert (x_ph_onehot.sum(-1) <= 1).all()
x = self.emb(x_ph) + self.fc_in(torch.cat([x_first, x_last], dim=-1))
out = self.ff(x)
out = self.conv(out.transpose(1, 2)).transpose(1, 2)
sequence = pack_padded_sequence(out, lengths, batch_first=True)
out, _ = self.lstm(sequence)
out, _ = pad_packed_sequence(out, batch_first=True)
outs = self.decoder(out, lengths, y)
return outs
def inference(self, x, lengths=None):
return pad_inference(
model=self,
x=x,
lengths=lengths,
reduction_factor=self.reduction_factor,
mdn=True,
)
| 17,378 | 35.056017 | 91 | py |
nnsvs | nnsvs-master/nnsvs/acoustic_models/tacotron_f0.py | import numpy as np
import torch
from nnsvs.acoustic_models.util import pad_inference
from nnsvs.base import BaseModel, PredictionType
from nnsvs.mdn import MDNLayer, mdn_get_most_probable_sigma_and_mu, mdn_get_sample
from nnsvs.tacotron.decoder import Prenet, ZoneOutCell
from nnsvs.util import init_weights
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
__all__ = [
"ResF0NonAttentiveDecoder",
"MDNResF0NonAttentiveDecoder",
"BiLSTMResF0NonAttentiveDecoder",
]
class ResF0NonAttentiveDecoder(BaseModel):
"""Duration-informed Tacotron with residual F0 prediction.
Args:
in_dim (int) : dimension of encoder hidden layer
out_dim (int) : dimension of output
layers (int) : number of LSTM layers
hidden_dim (int) : dimension of hidden layer
prenet_layers (int) : number of pre-net layers
prenet_hidden_dim (int) : dimension of pre-net hidden layer
prenet_dropout (float) : dropout rate of pre-net
zoneout (float) : zoneout rate
reduction_factor (int) : reduction factor
downsample_by_conv (bool) : if True, downsample by convolution
scaled_tanh (bool) : if True, use scaled tanh for residual F0 prediction
in_lf0_idx (int): index of lf0 in input features
in_lf0_min (float): minimum of lf0 in the training data of input features
in_lf0_max (float): maximum of lf0 in the training data of input features
out_lf0_idx (int): index of lf0 in output features
out_lf0_mean (float): mean of lf0 in the training data of output features
out_lf0_scale (float): scale of lf0 in the training data of output features
init_type (str): initialization type
eval_dropout (bool): if True, use dropout in evaluation
"""
def __init__(
self,
in_dim=512,
out_dim=1,
layers=2,
hidden_dim=1024,
prenet_layers=2,
prenet_hidden_dim=256,
prenet_dropout=0.5,
zoneout=0.1,
reduction_factor=1,
downsample_by_conv=False,
scaled_tanh=True,
# NOTE: you must carefully set the following parameters
in_lf0_idx=300,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=180,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
init_type="none",
eval_dropout=True,
):
super().__init__()
self.out_dim = out_dim
self.reduction_factor = reduction_factor
self.prenet_dropout = prenet_dropout
self.scaled_tanh = scaled_tanh
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
if prenet_layers > 0:
self.prenet = Prenet(
out_dim,
prenet_layers,
prenet_hidden_dim,
prenet_dropout,
eval_dropout=eval_dropout,
)
lstm_in_dim = in_dim + prenet_hidden_dim
else:
self.prenet = None
prenet_hidden_dim = 0
lstm_in_dim = in_dim + out_dim
self.lstm = nn.ModuleList()
for layer in range(layers):
lstm = nn.LSTMCell(
lstm_in_dim if layer == 0 else hidden_dim,
hidden_dim,
)
self.lstm += [ZoneOutCell(lstm, zoneout)]
proj_in_dim = in_dim + hidden_dim
self.feat_out = nn.Linear(proj_in_dim, out_dim * reduction_factor, bias=False)
if reduction_factor > 1 and downsample_by_conv:
self.conv_downsample = nn.Conv1d(
in_dim,
in_dim,
kernel_size=reduction_factor,
stride=reduction_factor,
groups=in_dim,
)
else:
self.conv_downsample = None
init_weights(self, init_type)
def _zero_state(self, hs):
init_hs = hs.new_zeros(hs.size(0), self.lstm[0].hidden_size)
return init_hs
def is_autoregressive(self):
return True
def has_residual_lf0_prediction(self):
return True
def forward(self, encoder_outs, in_lens, decoder_targets=None):
"""Forward step
Args:
encoder_outs (torch.Tensor): encoder outputs (B, T, C)
in_lens (torch.Tensor): input lengths
decoder_targets (torch.Tensor): decoder targets for teacher-forcing. (B, T, C)
Returns:
torch.Tensor: the output (B, C, T)
"""
is_inference = decoder_targets is None
if not is_inference:
assert encoder_outs.shape[1] == decoder_targets.shape[1]
# Denormalize lf0 from input musical score
lf0_score = encoder_outs[:, :, self.in_lf0_idx].unsqueeze(-1)
lf0_score_denorm = (
lf0_score * (self.in_lf0_max - self.in_lf0_min) + self.in_lf0_min
)
# (B, T, C) -> (B, C, T)
lf0_score_denorm = lf0_score_denorm.transpose(1, 2)
# To avoid unbounded residual f0 that would potentially cause artifacts,
# let's constrain the residual F0 to be in a certain range by the scaled tanh
residual_f0_max_cent = 600
max_lf0_ratio = residual_f0_max_cent * np.log(2) / 1200
# Adjust number of frames according to the reduction factor
# (B, Lmax, out_dim) -> (B, Lmax/r, out_dim)
if self.reduction_factor > 1 and not is_inference:
decoder_targets = decoder_targets[
:, self.reduction_factor - 1 :: self.reduction_factor
]
if self.reduction_factor > 1:
if self.conv_downsample is not None:
encoder_outs = self.conv_downsample(
encoder_outs.transpose(1, 2)
).transpose(1, 2)
else:
encoder_outs = encoder_outs[
:, self.reduction_factor - 1 :: self.reduction_factor
]
h_list, c_list = [], []
for _ in range(len(self.lstm)):
h_list.append(self._zero_state(encoder_outs))
c_list.append(self._zero_state(encoder_outs))
go_frame = encoder_outs.new_zeros(encoder_outs.size(0), self.out_dim)
prev_out = go_frame
if not is_inference and self.prenet is not None:
prenet_outs = self.prenet(decoder_targets)
outs = []
lf0_residuals = []
for t in range(encoder_outs.shape[1]):
# Pre-Net
if self.prenet is not None:
if is_inference:
prenet_out = self.prenet(prev_out)
else:
prenet_out = prenet_outs[:, t, :]
else:
prenet_out = F.dropout(prev_out, self.prenet_dropout, training=True)
# LSTM
xs = torch.cat([encoder_outs[:, t], prenet_out], dim=1)
h_list[0], c_list[0] = self.lstm[0](xs, (h_list[0], c_list[0]))
for i in range(1, len(self.lstm)):
h_list[i], c_list[i] = self.lstm[i](
h_list[i - 1], (h_list[i], c_list[i])
)
# Output
hcs = torch.cat([h_list[-1], encoder_outs[:, t]], dim=1)
out = self.feat_out(hcs).view(encoder_outs.size(0), self.out_dim, -1)
# Residual F0
if self.scaled_tanh:
lf0_residual = max_lf0_ratio * torch.tanh(
out[:, self.out_lf0_idx, :]
).unsqueeze(1)
else:
lf0_residual = out[:, self.out_lf0_idx, :].unsqueeze(1)
# Residual connection in the denormalized f0 domain
lf0_score_denorm_t = lf0_score_denorm[
:, :, t * self.reduction_factor : (t + 1) * self.reduction_factor
]
lf0_pred_denorm = lf0_score_denorm_t + lf0_residual
# Back to normalized f0
lf0_pred = (lf0_pred_denorm - self.out_lf0_mean) / self.out_lf0_scale
out[:, self.out_lf0_idx, :] = lf0_pred.squeeze(1)
outs.append(out)
lf0_residuals.append(lf0_residual)
# Update decoder input for the next time step
if is_inference:
prev_out = outs[-1][:, :, -1] # (1, out_dim)
else:
# Teacher forcing
prev_out = decoder_targets[:, t, :]
outs = torch.cat(outs, dim=2) # (B, out_dim, Lmax)
lf0_residuals = torch.cat(lf0_residuals, dim=2) # (B, out_dim, Lmax)
# (B, C, T) -> (B, T, C)
outs = outs.transpose(1, 2)
lf0_residuals = lf0_residuals.transpose(1, 2)
return outs, lf0_residuals
def inference(self, x, lengths=None):
return pad_inference(
model=self, x=x, lengths=lengths, reduction_factor=self.reduction_factor
)
class MDNResF0NonAttentiveDecoder(BaseModel):
"""Duration-informed Tacotron with residual F0 prediction (MDN-version)
Args:
in_dim (int) : dimension of encoder hidden layer
out_dim (int) : dimension of output
layers (int) : number of LSTM layers
hidden_dim (int) : dimension of hidden layer
prenet_layers (int) : number of pre-net layers
prenet_hidden_dim (int) : dimension of pre-net hidden layer
prenet_dropout (float) : dropout rate of pre-net
zoneout (float) : zoneout rate
reduction_factor (int) : reduction factor
downsample_by_conv (bool) : if True, downsample by convolution
scaled_tanh (bool) : if True, use scaled tanh for residual F0 prediction
num_gaussians (int) : number of Gaussian
sampling_mode (str) : sampling mode
in_lf0_idx (int): index of lf0 in input features
in_lf0_min (float): minimum of lf0 in the training data of input features
in_lf0_max (float): maximum of lf0 in the training data of input features
out_lf0_idx (int): index of lf0 in output features
out_lf0_mean (float): mean of lf0 in the training data of output features
out_lf0_scale (float): scale of lf0 in the training data of output features
init_type (str): initialization type
eval_dropout (bool): if True, use dropout in evaluation
"""
def __init__(
self,
in_dim=512,
out_dim=80,
layers=2,
hidden_dim=1024,
prenet_layers=2,
prenet_hidden_dim=256,
prenet_dropout=0.5,
zoneout=0.1,
reduction_factor=1,
downsample_by_conv=False,
scaled_tanh=True,
num_gaussians=4,
sampling_mode="mean",
# NOTE: you must carefully set the following parameters
in_lf0_idx=300,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=180,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
init_type="none",
eval_dropout=True,
):
super().__init__()
self.out_dim = out_dim
self.reduction_factor = reduction_factor
self.prenet_dropout = prenet_dropout
self.scaled_tanh = scaled_tanh
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
self.num_gaussians = num_gaussians
self.sampling_mode = sampling_mode
assert sampling_mode in ["mean", "random"]
if prenet_layers > 0:
self.prenet = Prenet(
out_dim,
prenet_layers,
prenet_hidden_dim,
prenet_dropout,
eval_dropout=eval_dropout,
)
lstm_in_dim = in_dim + prenet_hidden_dim
else:
self.prenet = None
prenet_hidden_dim = 0
lstm_in_dim = in_dim + out_dim
self.lstm = nn.ModuleList()
for layer in range(layers):
lstm = nn.LSTMCell(
lstm_in_dim if layer == 0 else hidden_dim,
hidden_dim,
)
self.lstm += [ZoneOutCell(lstm, zoneout)]
proj_in_dim = in_dim + hidden_dim
self.feat_out = MDNLayer(
proj_in_dim,
out_dim * reduction_factor,
num_gaussians=num_gaussians,
dim_wise=True,
)
if reduction_factor > 1 and downsample_by_conv:
self.conv_downsample = nn.Conv1d(
in_dim,
in_dim,
kernel_size=reduction_factor,
stride=reduction_factor,
groups=in_dim,
)
else:
self.conv_downsample = None
init_weights(self, init_type)
def _zero_state(self, hs):
init_hs = hs.new_zeros(hs.size(0), self.lstm[0].hidden_size)
return init_hs
def prediction_type(self):
return PredictionType.PROBABILISTIC
def is_autoregressive(self):
return True
def has_residual_lf0_prediction(self):
return True
def forward(self, encoder_outs, in_lens, decoder_targets=None):
"""Forward step
Args:
encoder_outs (torch.Tensor): encoder outputs (B, T, C)
in_lens (torch.Tensor): input lengths
decoder_targets (torch.Tensor): decoder targets for teacher-forcing. (B, T, C)
Returns:
torch.Tensor: the output (B, C, T)
"""
is_inference = decoder_targets is None
if not is_inference:
assert encoder_outs.shape[1] == decoder_targets.shape[1]
# Denormalize lf0 from input musical score
lf0_score = encoder_outs[:, :, self.in_lf0_idx].unsqueeze(-1)
lf0_score_denorm = (
lf0_score * (self.in_lf0_max - self.in_lf0_min) + self.in_lf0_min
)
# (B, T, C) -> (B, C, T)
# (B, T, C)
# lf0_score_denorm = lf0_score_denorm.transpose(1, 2)
# To avoid unbounded residual f0 that would potentially cause artifacts,
# let's constrain the residual F0 to be in a certain range by the scaled tanh
residual_f0_max_cent = 600
max_lf0_ratio = residual_f0_max_cent * np.log(2) / 1200
# Adjust number of frames according to the reduction factor
# (B, Lmax, out_dim) -> (B, Lmax/r, out_dim)
if self.reduction_factor > 1 and not is_inference:
decoder_targets = decoder_targets[
:, self.reduction_factor - 1 :: self.reduction_factor
]
if self.reduction_factor > 1:
if self.conv_downsample is not None:
encoder_outs = self.conv_downsample(
encoder_outs.transpose(1, 2)
).transpose(1, 2)
else:
encoder_outs = encoder_outs[
:, self.reduction_factor - 1 :: self.reduction_factor
]
h_list, c_list = [], []
for _ in range(len(self.lstm)):
h_list.append(self._zero_state(encoder_outs))
c_list.append(self._zero_state(encoder_outs))
go_frame = encoder_outs.new_zeros(encoder_outs.size(0), self.out_dim)
prev_out = go_frame
if not is_inference and self.prenet is not None:
prenet_outs = self.prenet(decoder_targets)
mus = []
log_pis = []
log_sigmas = []
lf0_residuals = []
mus_inf = []
for t in range(encoder_outs.shape[1]):
# Pre-Net
if self.prenet is not None:
if is_inference:
prenet_out = self.prenet(prev_out)
else:
prenet_out = prenet_outs[:, t, :]
else:
prenet_out = F.dropout(prev_out, self.prenet_dropout, training=True)
# LSTM
xs = torch.cat([encoder_outs[:, t], prenet_out], dim=1)
h_list[0], c_list[0] = self.lstm[0](xs, (h_list[0], c_list[0]))
for i in range(1, len(self.lstm)):
h_list[i], c_list[i] = self.lstm[i](
h_list[i - 1], (h_list[i], c_list[i])
)
# Output
hcs = torch.cat([h_list[-1], encoder_outs[:, t]], dim=1)
log_pi, log_sigma, mu = self.feat_out(hcs.unsqueeze(1))
# (B, reduction_factor, num_gaussians, out_dim)
log_pi = (
log_pi.transpose(1, 2)
.view(encoder_outs.size(0), self.num_gaussians, -1, self.out_dim)
.transpose(1, 2)
)
log_sigma = (
log_sigma.transpose(1, 2)
.view(encoder_outs.size(0), self.num_gaussians, -1, self.out_dim)
.transpose(1, 2)
)
mu = (
mu.transpose(1, 2)
.view(encoder_outs.size(0), self.num_gaussians, -1, self.out_dim)
.transpose(1, 2)
)
# Residual F0
# (B, reduction_factor, num_gaussians)
if self.scaled_tanh:
lf0_residual = max_lf0_ratio * torch.tanh(mu[:, :, :, self.out_lf0_idx])
else:
lf0_residual = mu[:, :, :, self.out_lf0_idx]
# Residual connection in the denormalized f0 domain
lf0_score_denorm_t = lf0_score_denorm[
:, t * self.reduction_factor : (t + 1) * self.reduction_factor, :
]
# NOTE: broadcast against num_gaussians axis
# (B, 1, 1) + (B, 1, num_gaussians) -> (B, 1, num_gaussians)
lf0_pred_denorm = lf0_score_denorm_t + lf0_residual
# Back to normalized f0
lf0_pred = (lf0_pred_denorm - self.out_lf0_mean) / self.out_lf0_scale
mu[:, :, :, self.out_lf0_idx] = lf0_pred
mus.append(mu)
log_pis.append(log_pi)
log_sigmas.append(log_sigma)
lf0_residuals.append(lf0_residual)
# Update decoder input for the next time step
if is_inference:
# (B, reduction_factor, out_dim)
if self.sampling_mode == "mean":
_, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
elif self.sampling_mode == "random":
mu = mdn_get_sample(log_pi, log_sigma, mu)
# Feed last sample for the feedback loop
prev_out = mu[:, -1]
mus_inf.append(mu)
else:
# Teacher forcing
prev_out = decoder_targets[:, t, :]
# (B, T, G, out_dim)
mus = torch.cat(mus, dim=1)
log_pis = torch.cat(log_pis, dim=1)
log_sigmas = torch.cat(log_sigmas, dim=1)
# (B, T, num_gaussians)
lf0_residuals = torch.cat(lf0_residuals, dim=1)
if is_inference:
mu = torch.cat(mus_inf, dim=1)
# TODO: may need to track sigma. For now we only use mu
return mu, mu
else:
return (log_pis, log_sigmas, mus), lf0_residuals
def inference(self, x, lengths=None):
return pad_inference(
model=self,
x=x,
lengths=lengths,
reduction_factor=self.reduction_factor,
mdn=True,
)
class BiLSTMResF0NonAttentiveDecoder(BaseModel):
"""BiLSTM-based encoder + duration-informed Tacotron with residual F0 prediction.
Args:
in_dim (int) : dimension of encoder hidden layer
ff_hidden_dim (int): Hidden dimension of feed-forward layers in the encoder.
conv_hidden_dim (int): Hidden dimension of convolution layers in the encoder.
lstm_hidden_dim (int): Hidden dimension of LSTM layers in the encoder.
num_lstm_layers (int): Number of LSTM layers in the encoder.
out_dim (int) : dimension of output
layers (int) : number of LSTM layers
hidden_dim (int) : dimension of hidden layer
prenet_layers (int) : number of pre-net layers
prenet_hidden_dim (int) : dimension of pre-net hidden layer
prenet_dropout (float) : dropout rate of pre-net
zoneout (float) : zoneout rate
reduction_factor (int) : reduction factor
downsample_by_conv (bool) : if True, downsample by convolution
scaled_tanh (bool) : if True, use scaled tanh for residual F0 prediction
in_lf0_idx (int): index of lf0 in input features
in_lf0_min (float): minimum of lf0 in the training data of input features
in_lf0_max (float): maximum of lf0 in the training data of input features
out_lf0_idx (int): index of lf0 in output features
out_lf0_mean (float): mean of lf0 in the training data of output features
out_lf0_scale (float): scale of lf0 in the training data of output features
use_mdn (bool): if True, use mixture density network for F0 prediction
num_gaussians (int): number of gaussians in MDN
sampling_mode (str): sampling mode in inference. "mean" or "random"
in_ph_start_idx (int): Start index of phoneme features.
in_ph_end_idx (int): End index of phoneme features.
embed_dim (int): Embedding dimension.
init_type (str): initialization type
"""
def __init__(
self,
in_dim=512,
ff_hidden_dim=2048,
conv_hidden_dim=1024,
lstm_hidden_dim=256,
num_lstm_layers=2,
dropout=0.0,
out_dim=80,
decoder_layers=2,
decoder_hidden_dim=1024,
prenet_layers=2,
prenet_hidden_dim=256,
prenet_dropout=0.5,
zoneout=0.1,
reduction_factor=1,
downsample_by_conv=False,
scaled_tanh=True,
# NOTE: you must carefully set the following parameters
in_lf0_idx=300,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=180,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
use_mdn=False,
num_gaussians=4,
sampling_mode="mean",
in_ph_start_idx: int = 1,
in_ph_end_idx: int = 50,
embed_dim=None,
init_type="none",
):
super().__init__()
self.reduction_factor = reduction_factor
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
self.use_mdn = use_mdn
self.in_dim = in_dim
self.in_ph_start_idx = in_ph_start_idx
self.in_ph_end_idx = in_ph_end_idx
self.num_vocab = in_ph_end_idx - in_ph_start_idx
self.embed_dim = embed_dim
if self.embed_dim is not None:
assert in_dim > self.num_vocab
self.emb = nn.Embedding(self.num_vocab, embed_dim)
self.fc_in = nn.Linear(in_dim - self.num_vocab, embed_dim)
in_ff_dim = embed_dim
else:
in_ff_dim = in_dim
# Encoder
# NOTE: can be simply replaced by a BiLSTM?
# so far I use sinsy like architecture
self.ff = nn.Sequential(
nn.Linear(in_ff_dim, ff_hidden_dim),
nn.ReLU(),
nn.Linear(ff_hidden_dim, ff_hidden_dim),
nn.ReLU(),
nn.Linear(ff_hidden_dim, ff_hidden_dim),
nn.ReLU(),
)
self.conv = nn.Sequential(
nn.ReflectionPad1d(3),
nn.Conv1d(ff_hidden_dim + 1, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
nn.ReflectionPad1d(3),
nn.Conv1d(conv_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
nn.ReflectionPad1d(3),
nn.Conv1d(conv_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
)
self.lstm = nn.LSTM(
conv_hidden_dim,
lstm_hidden_dim,
num_lstm_layers,
bidirectional=True,
batch_first=True,
dropout=dropout,
)
# Autoregressive decoder
decoder_in_dim = 2 * lstm_hidden_dim + 1
# NOTE: hard code in_lf0_idx to -1
cls = MDNResF0NonAttentiveDecoder if use_mdn else ResF0NonAttentiveDecoder
if use_mdn:
ex_kwargs = {"num_gaussians": num_gaussians, "sampling_mode": sampling_mode}
else:
ex_kwargs = {}
self.decoder = cls(
in_dim=decoder_in_dim,
out_dim=out_dim,
layers=decoder_layers,
hidden_dim=decoder_hidden_dim,
prenet_layers=prenet_layers,
prenet_hidden_dim=prenet_hidden_dim,
prenet_dropout=prenet_dropout,
zoneout=zoneout,
reduction_factor=reduction_factor,
downsample_by_conv=downsample_by_conv,
scaled_tanh=scaled_tanh,
in_lf0_idx=-1,
in_lf0_min=in_lf0_min,
in_lf0_max=in_lf0_max,
out_lf0_idx=out_lf0_idx,
out_lf0_mean=out_lf0_mean,
out_lf0_scale=out_lf0_scale,
**ex_kwargs,
)
init_weights(self, init_type)
def _set_lf0_params(self):
# Special care for residual F0 prediction models
# NOTE: don't overwrite out_lf0_idx and in_lf0_idx
if hasattr(self.decoder, "out_lf0_mean"):
self.decoder.in_lf0_min = self.in_lf0_min
self.decoder.in_lf0_max = self.in_lf0_max
self.decoder.out_lf0_mean = self.out_lf0_mean
self.decoder.out_lf0_scale = self.out_lf0_scale
def is_autoregressive(self):
return self.decoder.is_autoregressive()
def prediction_type(self):
return (
PredictionType.PROBABILISTIC
if self.use_mdn
else PredictionType.DETERMINISTIC
)
def has_residual_lf0_prediction(self):
return True
def forward(self, x, lengths=None, y=None):
self._set_lf0_params()
lf0_score = x[:, :, self.in_lf0_idx].unsqueeze(-1)
if self.embed_dim is not None:
x_first, x_ph_onehot, x_last = torch.split(
x,
[
self.in_ph_start_idx,
self.num_vocab,
self.in_dim - self.num_vocab - self.in_ph_start_idx,
],
dim=-1,
)
x_ph = torch.argmax(x_ph_onehot, dim=-1)
# Make sure to have one-hot vector
assert (x_ph_onehot.sum(-1) <= 1).all()
x = self.emb(x_ph) + self.fc_in(torch.cat([x_first, x_last], dim=-1))
if isinstance(lengths, torch.Tensor):
lengths = lengths.to("cpu")
out = self.ff(x)
out = torch.cat([out, lf0_score], dim=-1)
out = self.conv(out.transpose(1, 2)).transpose(1, 2)
sequence = pack_padded_sequence(out, lengths, batch_first=True)
out, _ = self.lstm(sequence)
out, _ = pad_packed_sequence(out, batch_first=True)
# NOTE: need to concat the lf0 score to the output of the lstm to tell
# the decoder the lf0
out = torch.cat([out, lf0_score], dim=-1)
outs, lf0_residual = self.decoder(out, lengths, y)
return outs, lf0_residual
def inference(self, x, lengths=None):
return pad_inference(
model=self,
x=x,
lengths=lengths,
reduction_factor=self.reduction_factor,
mdn=self.use_mdn,
)
| 27,907 | 36.111702 | 90 | py |
nnsvs | nnsvs-master/nnsvs/acoustic_models/util.py | import numpy as np
import torch
from nnsvs.base import PredictionType
from nnsvs.mdn import mdn_get_most_probable_sigma_and_mu
from torch.nn import functional as F
def predict_lf0_with_residual(
in_feats,
out_feats,
in_lf0_idx=300,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=180,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
residual_f0_max_cent=600,
):
"""Predict log-F0 with residual.
Args:
in_feats (np.ndarray): input features
out_feats (np.ndarray): output of an acoustic model
in_lf0_idx (int): index of LF0 in input features
in_lf0_min (float): minimum value of LF0 in the training data of input features
in_lf0_max (float): maximum value of LF0 in the training data of input features
out_lf0_idx (int): index of LF0 in output features
out_lf0_mean (float): mean of LF0 in the training data of output features
out_lf0_scale (float): scale of LF0 in the training data of output features
residual_f0_max_cent (int): maximum value of residual LF0 in cent
Returns:
tuple: (predicted log-F0, residual log-F0)
"""
# Denormalize lf0 from input musical score
lf0_score = in_feats[:, :, in_lf0_idx].unsqueeze(-1)
lf0_score_denorm = lf0_score * (in_lf0_max - in_lf0_min) + in_lf0_min
# To avoid unbounded residual f0 that would potentially cause artifacts,
# let's constrain the residual F0 to be in a certain range by the scaled tanh
max_lf0_ratio = residual_f0_max_cent * np.log(2) / 1200
if len(out_feats.shape) == 4:
# MDN case (B, T, num_gaussians, C) -> (B, T, num_gaussians)
lf0_residual = out_feats[:, :, :, out_lf0_idx]
else:
# (B, T, C) -> (B, T, 1)
lf0_residual = out_feats[:, :, out_lf0_idx].unsqueeze(-1)
lf0_residual = max_lf0_ratio * torch.tanh(lf0_residual)
# Residual connection in the denormalized f0 domain
lf0_pred_denorm = lf0_score_denorm + lf0_residual
# Back to normalized f0
lf0_pred = (lf0_pred_denorm - out_lf0_mean) / out_lf0_scale
return lf0_pred, lf0_residual
def pad_inference(
model, x, lengths, reduction_factor, mode="replicate", y=None, mdn=False
):
mod = max(lengths) % reduction_factor
pad = reduction_factor - mod
# Pad zeros to the end of the input features
# so that the length of the input features is a multiple of the reduction factor
if pad != 0:
x_pad = F.pad(x, (0, 0, 0, pad), mode=mode)
y_pad = F.pad(y, (0, 0, 0, pad), mode=mode) if y is not None else None
if isinstance(lengths, torch.Tensor):
lengths = lengths.clone()
else:
lengths = lengths.copy()
lengths = [length + pad for length in lengths]
else:
x_pad = x
y_pad = y if y is not None else None
y = model(x_pad, lengths, y_pad)
# Residual F0 prediction: (out, lf0)
if mdn:
assert isinstance(y, tuple) and len(y) == 2
# NOTE: need to parse per-stream output
if (
model.prediction_type() == PredictionType.MULTISTREAM_HYBRID
and y_pad is not None
):
if len(y[0]) == 4:
mgc, lf0, vuv, bap = y[0]
if isinstance(mgc, tuple) and len(mgc) == 3:
mgc = mdn_get_most_probable_sigma_and_mu(*mgc)[1]
elif isinstance(mgc, tuple) and len(mgc) == 2:
mgc = mgc[1]
if isinstance(bap, tuple) and len(bap) == 3:
bap = mdn_get_most_probable_sigma_and_mu(*bap)[1]
elif isinstance(bap, tuple) and len(bap) == 2:
bap = bap[1]
if pad != 0:
mgc = mgc[:, :-pad] if mgc.shape[1] > x.shape[1] else mgc
bap = bap[:, :-pad] if bap.shape[1] > x.shape[1] else bap
lf0 = lf0[:, :-pad] if lf0.shape[1] > x.shape[1] else lf0
vuv = vuv[:, :-pad] if vuv.shape[1] > x.shape[1] else vuv
mu = torch.cat([mgc, lf0, vuv, bap], dim=-1)
elif len(y[0]) == 3:
mel, lf0, vuv = y[0]
if isinstance(mel, tuple) and len(mel) == 3:
mel = mdn_get_most_probable_sigma_and_mu(*mel)[1]
elif isinstance(mel, tuple) and len(mel) == 2:
mel = mel[1]
if pad != 0:
mel = mel[:, :-pad] if mel.shape[1] > x.shape[1] else mel
lf0 = lf0[:, :-pad] if lf0.shape[1] > x.shape[1] else lf0
vuv = vuv[:, :-pad] if vuv.shape[1] > x.shape[1] else vuv
mu = torch.cat([mel, lf0, vuv], dim=-1)
else:
raise ValueError("Invalid number of streams: {}".format(len(y)))
sigma = mu
else:
mu, sigma = y
if pad != 0:
mu = mu[:, :-pad]
sigma = sigma[:, :-pad]
y = (mu, sigma)
else:
if model.has_residual_lf0_prediction():
y = y[0]
# Multiple output: (out, out_fine)
if isinstance(y, list):
y = y[-1]
if pad != 0:
y = y[:, :-pad]
return y
| 5,262 | 38.276119 | 87 | py |
nnsvs | nnsvs-master/nnsvs/acoustic_models/sinsy.py | import torch
from nnsvs.acoustic_models.util import predict_lf0_with_residual
from nnsvs.base import BaseModel, PredictionType
from nnsvs.mdn import MDNLayer, mdn_get_most_probable_sigma_and_mu
from nnsvs.util import init_weights
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
__all__ = [
"ResSkipF0FFConvLSTM",
]
class ResSkipF0FFConvLSTM(BaseModel):
"""FFN + Conv1d + LSTM + residual/skip connections
A model proposed in :cite:t:`hono2021sinsy`.
Args:
in_dim (int): input dimension
ff_hidden_dim (int): hidden dimension of feed-forward layer
conv_hidden_dim (int): hidden dimension of convolutional layer
lstm_hidden_dim (int): hidden dimension of LSTM layer
out_dim (int): output dimension
dropout (float): dropout rate
num_ls (int): number of layers of LSTM
bidirectional (bool): whether to use bidirectional LSTM or not
in_lf0_idx (int): index of lf0 in input features
in_lf0_min (float): minimum of lf0 in the training data of input features
in_lf0_max (float): maximum of lf0 in the training data of input features
out_lf0_idx (int): index of lf0 in output features
out_lf0_mean (float): mean of lf0 in the training data of output features
out_lf0_scale (float): scale of lf0 in the training data of output features
skip_inputs (bool): whether to use skip connection for the input features
init_type (str): initialization type
use_mdn (bool): whether to use MDN or not
num_gaussians (int): number of gaussians in MDN
dim_wise (bool): whether to use MDN with dim-wise or not
"""
def __init__(
self,
in_dim,
ff_hidden_dim=2048,
conv_hidden_dim=1024,
lstm_hidden_dim=256,
out_dim=199,
dropout=0.0,
num_lstm_layers=2,
bidirectional=True,
# NOTE: you must carefully set the following parameters
in_lf0_idx=300,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=180,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
skip_inputs=False,
init_type="none",
use_mdn=False,
num_gaussians=8,
dim_wise=False,
):
super().__init__()
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
self.skip_inputs = skip_inputs
self.use_mdn = use_mdn
self.ff = nn.Sequential(
nn.Linear(in_dim, ff_hidden_dim),
nn.ReLU(),
nn.Linear(ff_hidden_dim, ff_hidden_dim),
nn.ReLU(),
nn.Linear(ff_hidden_dim, ff_hidden_dim),
nn.ReLU(),
)
self.conv = nn.Sequential(
nn.ReflectionPad1d(3),
nn.Conv1d(ff_hidden_dim + 1, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
nn.ReflectionPad1d(3),
nn.Conv1d(conv_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
nn.ReflectionPad1d(3),
nn.Conv1d(conv_hidden_dim, conv_hidden_dim, kernel_size=7, padding=0),
nn.BatchNorm1d(conv_hidden_dim),
nn.ReLU(),
)
num_direction = 2 if bidirectional else 1
self.lstm = nn.LSTM(
conv_hidden_dim,
lstm_hidden_dim,
num_lstm_layers,
bidirectional=True,
batch_first=True,
dropout=dropout,
)
if self.skip_inputs:
last_in_dim = num_direction * lstm_hidden_dim + in_dim
else:
last_in_dim = num_direction * lstm_hidden_dim
if self.use_mdn:
self.mdn_layer = MDNLayer(
last_in_dim, out_dim, num_gaussians=num_gaussians, dim_wise=dim_wise
)
else:
self.fc = nn.Linear(last_in_dim, out_dim)
init_weights(self, init_type)
def prediction_type(self):
return (
PredictionType.PROBABILISTIC
if self.use_mdn
else PredictionType.DETERMINISTIC
)
def has_residual_lf0_prediction(self):
return True
def forward(self, x, lengths=None, y=None):
"""Forward step
Args:
x (torch.Tensor): the input tensor
lengths (torch.Tensor): the lengths of the input tensor
y (torch.Tensor): the optional target tensor
Returns:
torch.Tensor: the output tensor
"""
if isinstance(lengths, torch.Tensor):
lengths = lengths.to("cpu")
lf0_score = x[:, :, self.in_lf0_idx].unsqueeze(-1)
out = self.ff(x)
out = torch.cat([out, lf0_score], dim=-1)
out = self.conv(out.transpose(1, 2)).transpose(1, 2)
sequence = pack_padded_sequence(out, lengths, batch_first=True)
out, _ = self.lstm(sequence)
out, _ = pad_packed_sequence(out, batch_first=True)
out = torch.cat([out, x], dim=-1) if self.skip_inputs else out
if self.use_mdn:
log_pi, log_sigma, mu = self.mdn_layer(out)
else:
mu = self.fc(out)
lf0_pred, lf0_residual = predict_lf0_with_residual(
x,
mu,
self.in_lf0_idx,
self.in_lf0_min,
self.in_lf0_max,
self.out_lf0_idx,
self.out_lf0_mean,
self.out_lf0_scale,
)
# Inject the predicted lf0 into the output features
if self.use_mdn:
mu[:, :, :, self.out_lf0_idx] = lf0_pred
else:
mu[:, :, self.out_lf0_idx] = lf0_pred.squeeze(-1)
if self.use_mdn:
return (log_pi, log_sigma, mu), lf0_residual
else:
return mu, lf0_residual
def inference(self, x, lengths=None):
"""Inference step
Args:
x (torch.Tensor): input features
lengths (torch.Tensor): lengths of input features
Returns:
tuple: (mu, sigma) if use_mdn, (output, ) otherwise
"""
if self.use_mdn:
(log_pi, log_sigma, mu), _ = self(x, lengths)
sigma, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
return mu, sigma
else:
return self(x, lengths)[0]
| 6,623 | 32.12 | 84 | py |
nnsvs | nnsvs-master/nnsvs/acoustic_models/__init__.py | from functools import partial
from nnsvs.acoustic_models.multistream import (
MDNMultistreamSeparateF0MelModel,
MultistreamSeparateF0MelModel,
MultistreamSeparateF0ParametricModel,
NPSSMDNMultistreamParametricModel,
NPSSMultistreamParametricModel,
)
from nnsvs.acoustic_models.sinsy import ResSkipF0FFConvLSTM
from nnsvs.acoustic_models.tacotron import (
BiLSTMMDNNonAttentiveDecoder,
BiLSTMNonAttentiveDecoder,
MDNNonAttentiveDecoder,
NonAttentiveDecoder,
)
from nnsvs.acoustic_models.tacotron_f0 import (
BiLSTMResF0NonAttentiveDecoder,
MDNResF0NonAttentiveDecoder,
ResF0NonAttentiveDecoder,
)
from nnsvs.acoustic_models.util import predict_lf0_with_residual
from nnsvs.base import BaseModel, PredictionType
from nnsvs.layers.conv import ResnetBlock, WNConv1d
from nnsvs.mdn import MDNLayer, mdn_get_most_probable_sigma_and_mu
from nnsvs.model import TransformerEncoder, VariancePredictor
from nnsvs.util import init_weights
from torch import nn
__all__ = [
# Non-autoregressive models
"ResF0Conv1dResnet",
"ResSkipF0FFConvLSTM",
"ResF0VariancePredictor",
"ResF0TransformerEncoder",
# Autoregressive models
"NonAttentiveDecoder",
"MDNNonAttentiveDecoder",
"BiLSTMNonAttentiveDecoder",
"BiLSTMMDNNonAttentiveDecoder",
"ResF0NonAttentiveDecoder",
"MDNResF0NonAttentiveDecoder",
"BiLSTMResF0NonAttentiveDecoder",
# Multi-stream models
"MultistreamSeparateF0ParametricModel",
"NPSSMDNMultistreamParametricModel",
"NPSSMultistreamParametricModel",
"MultistreamSeparateF0MelModel",
"MDNMultistreamSeparateF0MelModel",
]
class ResF0Conv1dResnet(BaseModel):
"""Conv1d + Resnet + Residual F0 prediction
Residual F0 prediction is inspired by :cite:t:`hono2021sinsy`.
Args:
in_dim (int): input dimension
hidden_dim (int): hidden dimension
out_dim (int): output dimension
num_layers (int): number of layers
in_lf0_idx (int): index of lf0 in input features
in_lf0_min (float): minimum value of lf0 in the training data of input features
in_lf0_max (float): maximum value of lf0 in the training data of input features
out_lf0_idx (int): index of lf0 in output features. Typically 180.
out_lf0_mean (float): mean of lf0 in the training data of output features
out_lf0_scale (float): scale of lf0 in the training data of output features
init_type (str): initialization type
use_mdn (bool): whether to use MDN or not
num_gaussians (int): number of gaussians in MDN
dim_wise (bool): whether to use dimension-wise MDN or not
"""
def __init__(
self,
in_dim,
hidden_dim,
out_dim,
num_layers=4,
# NOTE: you must carefully set the following parameters
in_lf0_idx=300,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=180,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
init_type="none",
use_mdn=False,
num_gaussians=8,
dim_wise=False,
):
super().__init__()
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
self.use_mdn = use_mdn
model = [
nn.ReflectionPad1d(3),
WNConv1d(in_dim, hidden_dim, kernel_size=7, padding=0),
]
for n in range(num_layers):
model.append(ResnetBlock(hidden_dim, dilation=2 ** n))
last_conv_out_dim = hidden_dim if use_mdn else out_dim
model += [
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(3),
WNConv1d(hidden_dim, last_conv_out_dim, kernel_size=7, padding=0),
]
self.model = nn.Sequential(*model)
if self.use_mdn:
self.mdn_layer = MDNLayer(
in_dim=hidden_dim,
out_dim=out_dim,
num_gaussians=num_gaussians,
dim_wise=dim_wise,
)
else:
self.mdn_layer = None
init_weights(self, init_type)
def prediction_type(self):
return (
PredictionType.PROBABILISTIC
if self.use_mdn
else PredictionType.DETERMINISTIC
)
def has_residual_lf0_prediction(self):
return True
def forward(self, x, lengths=None, y=None):
"""Forward step
Args:
x (torch.Tensor): input features
lengths (torch.Tensor): lengths of input features
y (torch.Tensor): output features
Returns:
tuple: (output features, residual log-F0)
"""
out = self.model(x.transpose(1, 2)).transpose(1, 2)
if self.use_mdn:
log_pi, log_sigma, mu = self.mdn_layer(out)
else:
mu = out
lf0_pred, lf0_residual = predict_lf0_with_residual(
x,
mu,
self.in_lf0_idx,
self.in_lf0_min,
self.in_lf0_max,
self.out_lf0_idx,
self.out_lf0_mean,
self.out_lf0_scale,
)
# Inject the predicted lf0 into the output features
if self.use_mdn:
mu[:, :, :, self.out_lf0_idx] = lf0_pred
else:
mu[:, :, self.out_lf0_idx] = lf0_pred.squeeze(-1)
if self.use_mdn:
return (log_pi, log_sigma, mu), lf0_residual
else:
return mu, lf0_residual
def inference(self, x, lengths=None):
"""Inference step
Args:
x (torch.Tensor): input features
lengths (torch.Tensor): lengths of input features
Returns:
tuple: (mu, sigma) if use_mdn, (output, ) otherwise
"""
if self.use_mdn:
(log_pi, log_sigma, mu), _ = self(x, lengths)
sigma, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
return mu, sigma
else:
return self(x, lengths)[0]
# Will be removed in v0.1.0
ResF0Conv1dResnetMDN = partial(ResF0Conv1dResnet, use_mdn=True)
class ResF0VariancePredictor(VariancePredictor):
"""Variance predictor in :cite:t:`ren2020fastspeech` with residual F0 prediction
Args:
in_dim (int): the input dimension
out_dim (int): the output dimension
num_layers (int): the number of layers
hidden_dim (int): the hidden dimension
kernel_size (int): the kernel size
dropout (float): the dropout rate
in_lf0_idx (int): the index of the input LF0
in_lf0_min (float): the minimum value of the input LF0
in_lf0_max (float): the maximum value of the input LF0
out_lf0_idx (int): the index of the output LF0
out_lf0_mean (float): the mean value of the output LF0
out_lf0_scale (float): the scale value of the output LF0
init_type (str): the initialization type
use_mdn (bool): whether to use MDN or not
num_gaussians (int): the number of gaussians
dim_wise (bool): whether to use dim-wise or not
"""
def __init__(
self,
in_dim,
out_dim,
num_layers=5,
hidden_dim=256,
kernel_size=5,
dropout=0.5,
init_type="none",
use_mdn=False,
num_gaussians=1,
dim_wise=False,
# NOTE: you must carefully set the following parameters
in_lf0_idx=300,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=180,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
):
super().__init__(
in_dim=in_dim,
out_dim=out_dim,
num_layers=num_layers,
hidden_dim=hidden_dim,
kernel_size=kernel_size,
dropout=dropout,
init_type=init_type,
use_mdn=use_mdn,
num_gaussians=num_gaussians,
dim_wise=dim_wise,
)
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
def has_residual_lf0_prediction(self):
return True
def forward(self, x, lengths=None, y=None):
"""Forward step
Args:
x (torch.Tensor): input features
lengths (torch.Tensor): lengths of input features
y (torch.Tensor): output features
Returns:
tuple: (output features, residual log-F0)
"""
out = super().forward(x, lengths, y)
if self.use_mdn:
log_pi, log_sigma, mu = out
else:
mu = out
lf0_pred, lf0_residual = predict_lf0_with_residual(
x,
mu,
self.in_lf0_idx,
self.in_lf0_min,
self.in_lf0_max,
self.out_lf0_idx,
self.out_lf0_mean,
self.out_lf0_scale,
)
# Inject the predicted lf0 into the output features
if self.use_mdn:
mu[:, :, :, self.out_lf0_idx] = lf0_pred
else:
mu[:, :, self.out_lf0_idx] = lf0_pred.squeeze(-1)
if self.use_mdn:
return (log_pi, log_sigma, mu), lf0_residual
else:
return mu, lf0_residual
def inference(self, x, lengths=None):
"""Inference step
Args:
x (torch.Tensor): input features
lengths (torch.Tensor): lengths of input features
Returns:
tuple: (mu, sigma) if use_mdn, (output, ) otherwise
"""
if self.use_mdn:
(log_pi, log_sigma, mu), _ = self(x, lengths)
sigma, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
return mu, sigma
else:
return self(x, lengths)[0]
class ResF0TransformerEncoder(BaseModel):
"""Transformer encoder with residual f0 prediction"""
def __init__(
self,
in_dim,
out_dim,
hidden_dim,
attention_dim,
num_heads=2,
num_layers=2,
kernel_size=3,
dropout=0.1,
reduction_factor=1,
init_type="none",
downsample_by_conv=False,
# NOTE: you must carefully set the following parameters
in_lf0_idx=300,
in_lf0_min=5.3936276,
in_lf0_max=6.491111,
out_lf0_idx=180,
out_lf0_mean=5.953093881972361,
out_lf0_scale=0.23435173188961034,
):
super().__init__()
self.in_lf0_idx = in_lf0_idx
self.in_lf0_min = in_lf0_min
self.in_lf0_max = in_lf0_max
self.out_lf0_idx = out_lf0_idx
self.out_lf0_mean = out_lf0_mean
self.out_lf0_scale = out_lf0_scale
self.reduction_factor = reduction_factor
self.encoder = TransformerEncoder(
in_dim=in_dim,
out_dim=out_dim,
hidden_dim=hidden_dim,
attention_dim=attention_dim,
num_heads=num_heads,
num_layers=num_layers,
kernel_size=kernel_size,
dropout=dropout,
reduction_factor=reduction_factor,
init_type=init_type,
downsample_by_conv=downsample_by_conv,
)
def has_residual_lf0_prediction(self):
return True
def forward(self, x, lengths=None, y=None):
"""Forward pass
Args:
x (torch.Tensor): input tensor
lengths (torch.Tensor): input sequence lengths
y (torch.Tensor): target tensor (optional)
Returns:
torch.Tensor: output tensor
"""
outs = self.encoder(x, lengths)
lf0_pred, lf0_residual = predict_lf0_with_residual(
x,
outs,
self.in_lf0_idx,
self.in_lf0_min,
self.in_lf0_max,
self.out_lf0_idx,
self.out_lf0_mean,
self.out_lf0_scale,
)
outs[:, :, self.out_lf0_idx] = lf0_pred.squeeze(-1)
return outs, lf0_residual
def inference(self, x, lengths):
return self(x, lengths)[0]
def LSTMEncoder(*args, **kwargs):
from nnsvs.model import LSTMEncoder
return LSTMEncoder(*args, **kwargs)
| 12,451 | 29.669951 | 87 | py |
nnsvs | nnsvs-master/nnsvs/bin/train_postfilter.py | from functools import partial
from pathlib import Path
import hydra
import mlflow
import numpy as np
import torch
import torch.distributed as dist
from hydra.utils import to_absolute_path
from nnsvs.multistream import select_streams
from nnsvs.train_util import (
collate_fn_default,
collate_fn_random_segments,
compute_distortions,
eval_model,
log_params_from_omegaconf_dict,
save_checkpoint,
save_configs,
setup_gan,
)
from nnsvs.util import PyTorchStandardScaler, load_vocoder, make_non_pad_mask
from omegaconf import DictConfig
from torch import nn
from torch.cuda.amp import autocast
from torch.nn import functional as F
def train_step(
model_config,
optim_config,
netG,
optG,
netD,
optD,
grad_scaler,
train,
in_feats,
out_feats,
lengths,
out_scaler,
mse_weight=1.0,
adv_weight=1.0,
adv_streams=None,
fm_weight=0.0,
mask_nth_mgc_for_adv_loss=0,
gan_type="lsgan",
vuv_mask=False,
):
netG.train() if train else netG.eval()
netD.train() if train else netD.eval()
log_metrics = {}
if vuv_mask:
# NOTE: Assuming 3rd stream is the V/UV
vuv_idx = np.sum(model_config.stream_sizes[:2])
is_v = torch.logical_and(
out_feats[:, :, vuv_idx : vuv_idx + 1] > 0,
in_feats[:, :, vuv_idx : vuv_idx + 1] > 0,
)
vuv = is_v
else:
vuv = 1.0
# Run forward
with autocast(enabled=grad_scaler is not None):
pred_out_feats = netG(in_feats, lengths)
real_netD_in_feats = select_streams(
out_feats, model_config.stream_sizes, adv_streams
)
fake_netD_in_feats = select_streams(
pred_out_feats,
model_config.stream_sizes,
adv_streams,
)
# Ref: http://sython.org/papers/ASJ/saito2017asja.pdf
# 0-th mgc with adversarial trainging affects speech quality
# NOTE: assuming that the first stream contains mgc
if mask_nth_mgc_for_adv_loss > 0:
real_netD_in_feats = real_netD_in_feats[:, :, mask_nth_mgc_for_adv_loss:]
fake_netD_in_feats = fake_netD_in_feats[:, :, mask_nth_mgc_for_adv_loss:]
# Real
with autocast(enabled=grad_scaler is not None):
D_real = netD(real_netD_in_feats * vuv, in_feats, lengths)
# NOTE: must be list of list to support multi-scale discriminators
assert isinstance(D_real, list) and isinstance(D_real[-1], list)
# Fake
D_fake_det = netD(fake_netD_in_feats.detach() * vuv, in_feats, lengths)
# Mask (B, T, 1)
mask = make_non_pad_mask(lengths).unsqueeze(-1).to(in_feats.device)
# Update discriminator
eps = 1e-14
loss_real = 0
loss_fake = 0
with autocast(enabled=grad_scaler is not None):
for idx, (D_real_, D_fake_det_) in enumerate(zip(D_real, D_fake_det)):
if gan_type == "lsgan":
loss_real_ = (D_real_[-1] - 1) ** 2
loss_fake_ = D_fake_det_[-1] ** 2
elif gan_type == "vanilla-gan":
loss_real_ = -torch.log(D_real_[-1] + eps)
loss_fake_ = -torch.log(1 - D_fake_det_[-1] + eps)
elif gan_type == "hinge":
loss_real_ = F.relu(1 - D_real_[-1])
loss_fake_ = F.relu(1 + D_fake_det_[-1])
else:
raise ValueError(f"Unknown gan type: {gan_type}")
# mask for D
if (
hasattr(netD, "downsample_scale")
and mask.shape[1] // netD.downsample_scale == D_real_[-1].shape[1]
):
D_mask = mask[:, :: netD.downsample_scale, :]
else:
if D_real_[-1].shape[1] == out_feats.shape[1]:
D_mask = mask
else:
D_mask = None
if D_mask is not None:
loss_real_ = loss_real_.masked_select(D_mask).mean()
loss_fake_ = loss_fake_.masked_select(D_mask).mean()
else:
loss_real_ = loss_real_.mean()
loss_fake_ = loss_fake_.mean()
log_metrics[f"Loss_Real_Scale{idx}"] = loss_real_.item()
log_metrics[f"Loss_Fake_Scale{idx}"] = loss_fake_.item()
loss_real += loss_real_
loss_fake += loss_fake_
loss_d = loss_real + loss_fake
if train:
optD.zero_grad()
if grad_scaler is not None:
grad_scaler.scale(loss_d).backward()
grad_scaler.unscale_(optD)
grad_norm_d = torch.nn.utils.clip_grad_norm_(
netD.parameters(), optim_config.netD.clip_norm
)
log_metrics["GradNorm_D"] = grad_norm_d
grad_scaler.step(optD)
else:
loss_d.backward()
grad_norm_d = torch.nn.utils.clip_grad_norm_(
netD.parameters(), optim_config.netD.clip_norm
)
log_metrics["GradNorm_D"] = grad_norm_d
optD.step()
# MSE loss
with autocast(enabled=grad_scaler is not None):
loss_feats = nn.MSELoss(reduction="none")(
pred_out_feats.masked_select(mask), out_feats.masked_select(mask)
).mean()
# adversarial loss
with autocast(enabled=grad_scaler is not None):
D_fake = netD(fake_netD_in_feats * vuv, in_feats, lengths)
loss_adv = 0
for idx, D_fake_ in enumerate(D_fake):
if gan_type == "lsgan":
loss_adv_ = (1 - D_fake_[-1]) ** 2
elif gan_type == "vanilla-gan":
loss_adv_ = -torch.log(D_fake_[-1] + eps)
elif gan_type == "hinge":
loss_adv_ = -D_fake_[-1]
else:
raise ValueError(f"Unknown gan type: {gan_type}")
if (
hasattr(netD, "downsample_scale")
and mask.shape[1] // netD.downsample_scale == D_fake_[-1].shape[1]
):
D_mask = mask[:, :: netD.downsample_scale, :]
else:
if D_real_[-1].shape[1] == out_feats.shape[1]:
D_mask = mask
else:
D_mask = None
if D_mask is not None:
loss_adv_ = loss_adv_.masked_select(D_mask).mean()
else:
loss_adv_ = loss_adv_.mean()
log_metrics[f"Loss_Adv_Scale{idx}"] = loss_adv_.item()
loss_adv += loss_adv_
# Feature matching loss
loss_fm = torch.tensor(0.0).to(in_feats.device)
if fm_weight > 0:
for D_fake_, D_real_ in zip(D_fake, D_real):
for fake_fmap, real_fmap in zip(D_fake_[:-1], D_real_[:-1]):
loss_fm += F.l1_loss(fake_fmap, real_fmap.detach())
loss = mse_weight * loss_feats + adv_weight * loss_adv + fm_weight * loss_fm
if train:
optG.zero_grad()
if grad_scaler is not None:
grad_scaler.scale(loss).backward()
grad_scaler.unscale_(optG)
grad_norm_g = torch.nn.utils.clip_grad_norm_(
netG.parameters(), optim_config.netG.clip_norm
)
log_metrics["GradNorm_G"] = grad_norm_g
grad_scaler.step(optG)
else:
loss.backward()
grad_norm_g = torch.nn.utils.clip_grad_norm_(
netG.parameters(), optim_config.netG.clip_norm
)
log_metrics["GradNorm_G"] = grad_norm_g
optG.step()
# NOTE: this shouldn't be called multiple times in a training step
if train and grad_scaler is not None:
grad_scaler.update()
# Metrics
distortions = compute_distortions(
pred_out_feats, out_feats, lengths, out_scaler, model_config
)
log_metrics.update(distortions)
log_metrics.update(
{
"Loss": loss.item(),
"Loss_Feats": loss_feats.item(),
"Loss_Adv_Total": loss_adv.item(),
"Loss_Feature_Matching": loss_fm.item(),
"Loss_Real_Total": loss_real.item(),
"Loss_Fake_Total": loss_fake.item(),
"Loss_D": loss_d.item(),
}
)
return loss, log_metrics
def train_loop(
config,
logger,
device,
netG,
optG,
schedulerG,
netD,
optD,
schedulerD,
grad_scaler,
data_loaders,
samplers,
writer,
out_scaler,
use_mlflow,
vocoder,
vocoder_in_scaler,
):
out_dir = Path(to_absolute_path(config.train.out_dir))
best_dev_loss = torch.finfo(torch.float32).max
last_dev_loss = torch.finfo(torch.float32).max
adv_streams = config.train.adv_streams
if len(adv_streams) != len(config.model.stream_sizes):
raise ValueError("adv_streams must be specified for all streams")
if dist.is_initialized() and dist.get_rank() != 0:
def tqdm(x, **kwargs):
return x
else:
from tqdm import tqdm
train_iter = 1
for epoch in tqdm(range(1, config.train.nepochs + 1)):
for phase in data_loaders.keys():
train = phase.startswith("train")
# https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
if dist.is_initialized() and train and samplers[phase] is not None:
samplers[phase].set_epoch(epoch)
running_loss = 0
running_metrics = {}
evaluated = False
for in_feats, out_feats, lengths in tqdm(
data_loaders[phase], desc=f"{phase} iter", leave=False
):
# NOTE: This is needed for pytorch's PackedSequence
lengths, indices = torch.sort(lengths, dim=0, descending=True)
in_feats, out_feats = (
in_feats[indices].to(device),
out_feats[indices].to(device),
)
if (not train) and (not evaluated):
eval_model(
phase,
epoch,
netG,
in_feats,
out_feats,
lengths,
config.model,
out_scaler,
writer,
sr=config.data.sample_rate,
use_world_codec=config.data.use_world_codec,
vocoder=vocoder,
vocoder_in_scaler=vocoder_in_scaler,
max_num_eval_utts=config.train.max_num_eval_utts,
)
evaluated = True
loss, log_metrics = train_step(
model_config=config.model,
optim_config=config.train.optim,
netG=netG,
optG=optG,
netD=netD,
optD=optD,
grad_scaler=grad_scaler,
train=train,
in_feats=in_feats,
out_feats=out_feats,
lengths=lengths,
out_scaler=out_scaler,
mse_weight=config.train.mse_weight,
adv_weight=config.train.adv_weight,
adv_streams=adv_streams,
fm_weight=config.train.fm_weight,
mask_nth_mgc_for_adv_loss=config.train.mask_nth_mgc_for_adv_loss,
gan_type=config.train.gan_type,
vuv_mask=config.train.vuv_mask,
)
if train:
if writer is not None:
for key, val in log_metrics.items():
writer.add_scalar(f"{key}_Step/{phase}", val, train_iter)
train_iter += 1
running_loss += loss.item()
for k, v in log_metrics.items():
try:
running_metrics[k] += float(v)
except KeyError:
running_metrics[k] = float(v)
ave_loss = running_loss / len(data_loaders[phase])
logger.info("[%s] [Epoch %s]: loss %s", phase, epoch, ave_loss)
if writer is not None:
writer.add_scalar(f"Loss_Epoch/{phase}", ave_loss, epoch)
if use_mlflow:
mlflow.log_metric(f"{phase}_loss", ave_loss, step=epoch)
for k, v in running_metrics.items():
ave_v = v / len(data_loaders[phase])
if writer is not None:
writer.add_scalar(f"{k}_Epoch/{phase}", ave_v, epoch)
if use_mlflow:
mlflow.log_metric(f"{phase}_{k}", ave_v, step=epoch)
if not train:
last_dev_loss = ave_loss
if not train and ave_loss < best_dev_loss:
best_dev_loss = ave_loss
for model, opt, scheduler, postfix in [
(netG, optG, schedulerG, ""),
(netD, optD, schedulerD, "_D"),
]:
save_checkpoint(
logger,
out_dir,
model,
opt,
scheduler,
epoch,
is_best=True,
postfix=postfix,
)
schedulerG.step()
schedulerD.step()
if epoch % config.train.checkpoint_epoch_interval == 0:
for model, opt, scheduler, postfix in [
(netG, optG, schedulerG, ""),
(netD, optD, schedulerD, "_D"),
]:
save_checkpoint(
logger,
out_dir,
model,
opt,
scheduler,
epoch,
is_best=False,
postfix=postfix,
)
for model, opt, scheduler, postfix in [
(netG, optG, schedulerG, ""),
(netD, optD, schedulerD, "_D"),
]:
save_checkpoint(
logger,
out_dir,
model,
opt,
scheduler,
config.train.nepochs,
postfix=postfix,
)
logger.info("The best loss was %s", best_dev_loss)
if use_mlflow:
mlflow.log_metric("best_dev_loss", best_dev_loss, step=epoch)
mlflow.log_artifacts(out_dir)
return last_dev_loss
@hydra.main(config_path="conf/train_postfilter", config_name="config")
def my_app(config: DictConfig) -> None:
# NOTE: set discriminator's in_dim automatically
if config.model.netD.in_dim is None:
stream_sizes = np.asarray(config.model.stream_sizes)
D_in_dim = int((stream_sizes * np.asarray(config.train.adv_streams)).sum())
if config.train.mask_nth_mgc_for_adv_loss > 0:
D_in_dim -= config.train.mask_nth_mgc_for_adv_loss
config.model.netD.in_dim = D_in_dim
if "stream_sizes" in config.model.netG:
config.model.netG.stream_sizes = config.model.stream_sizes
if "max_time_frames" in config.data and config.data.max_time_frames > 0:
collate_fn = partial(
collate_fn_random_segments, max_time_frames=config.data.max_time_frames
)
else:
collate_fn = collate_fn_default
if config.train.use_ddp:
dist.init_process_group("nccl")
rank = dist.get_rank()
device_id = rank % torch.cuda.device_count()
torch.cuda.set_device(device_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
(
(netG, optG, schedulerG),
(netD, optD, schedulerD),
grad_scaler,
data_loaders,
samplers,
writer,
logger,
_,
out_scaler,
) = setup_gan(config, device, collate_fn)
path = config.train.pretrained_vocoder_checkpoint
if path is not None and len(path) > 0:
logger.info(f"Loading pretrained vocoder checkpoint from {path}")
vocoder, vocoder_in_scaler = load_vocoder(path, device)
else:
vocoder, vocoder_in_scaler = None, None
out_scaler = PyTorchStandardScaler(
torch.from_numpy(out_scaler.mean_), torch.from_numpy(out_scaler.scale_)
).to(device)
use_mlflow = config.mlflow.enabled
if use_mlflow:
with mlflow.start_run() as run:
# NOTE: modify out_dir when running with mlflow
config.train.out_dir = f"{config.train.out_dir}/{run.info.run_id}"
save_configs(config)
log_params_from_omegaconf_dict(config)
last_dev_loss = train_loop(
config,
logger,
device,
netG,
optG,
schedulerG,
netD,
optD,
schedulerD,
grad_scaler,
data_loaders,
samplers,
writer,
out_scaler,
use_mlflow,
vocoder,
vocoder_in_scaler,
)
else:
save_configs(config)
last_dev_loss = train_loop(
config,
logger,
device,
netG,
optG,
schedulerG,
netD,
optD,
schedulerD,
grad_scaler,
data_loaders,
samplers,
writer,
out_scaler,
use_mlflow,
vocoder,
vocoder_in_scaler,
)
return last_dev_loss
def entry():
my_app()
if __name__ == "__main__":
my_app()
| 17,689 | 31.820037 | 103 | py |
nnsvs | nnsvs-master/nnsvs/bin/generate.py | # coding: utf-8
import os
from os.path import basename, join
import hydra
import joblib
import numpy as np
import torch
from hydra.utils import to_absolute_path
from nnmnkwii.datasets import FileSourceDataset
from nnsvs.base import PredictionType
from nnsvs.logger import getLogger
from nnsvs.multistream import get_windows, multi_stream_mlpg
from nnsvs.train_util import NpyFileSource
from omegaconf import DictConfig, OmegaConf
from tqdm import tqdm
logger = None
use_cuda = torch.cuda.is_available()
@hydra.main(config_path="conf/generate", config_name="config")
def my_app(config: DictConfig) -> None:
global logger
logger = getLogger(config.verbose)
logger.info(OmegaConf.to_yaml(config))
device = torch.device("cuda" if use_cuda else "cpu")
in_dir = to_absolute_path(config.in_dir)
out_dir = to_absolute_path(config.out_dir)
os.makedirs(out_dir, exist_ok=True)
model_config = OmegaConf.load(to_absolute_path(config.model.model_yaml))
model = hydra.utils.instantiate(model_config.netG).to(device)
checkpoint = torch.load(
to_absolute_path(config.model.checkpoint),
map_location=lambda storage, loc: storage,
)
model.load_state_dict(checkpoint["state_dict"])
model.eval()
scaler = joblib.load(to_absolute_path(config.out_scaler_path))
in_feats = FileSourceDataset(NpyFileSource(in_dir, logger))
with torch.no_grad():
for idx in tqdm(range(len(in_feats))):
feats = torch.from_numpy(in_feats[idx]).unsqueeze(0).to(device)
if model.prediction_type() == PredictionType.PROBABILISTIC:
max_mu, max_sigma = model.inference(feats, [feats.shape[1]])
if np.any(model_config.has_dynamic_features):
# Apply denormalization
# (B, T, D_out) -> (T, D_out)
max_sigma_sq = (
max_sigma.squeeze(0).cpu().data.numpy() ** 2 * scaler.var_
)
max_mu = scaler.inverse_transform(
max_mu.squeeze(0).cpu().data.numpy()
)
# Apply MLPG
# (T, D_out) -> (T, static_dim)
out = multi_stream_mlpg(
max_mu,
max_sigma_sq,
get_windows(model_config.num_windows),
model_config.stream_sizes,
model_config.has_dynamic_features,
)
else:
# (T, D_out)
out = max_mu.squeeze(0).cpu().data.numpy()
out = scaler.inverse_transform(out)
else:
out = (
model.inference(feats, [feats.shape[1]])
.squeeze(0)
.cpu()
.data.numpy()
)
out = scaler.inverse_transform(out)
# Apply MLPG if necessary
if np.any(model_config.has_dynamic_features):
out = multi_stream_mlpg(
out,
scaler.var_,
get_windows(model_config.num_windows),
model_config.stream_sizes,
model_config.has_dynamic_features,
)
name = basename(in_feats.collected_files[idx][0])
out_path = join(out_dir, name)
np.save(out_path, out, allow_pickle=False)
def entry():
my_app()
if __name__ == "__main__":
my_app()
| 3,601 | 32.045872 | 82 | py |
nnsvs | nnsvs-master/nnsvs/bin/train_acoustic.py | from functools import partial
from pathlib import Path
import hydra
import mlflow
import torch
import torch.distributed as dist
from hydra.utils import to_absolute_path
from nnsvs.base import PredictionType
from nnsvs.mdn import mdn_get_most_probable_sigma_and_mu, mdn_loss
from nnsvs.multistream import split_streams
from nnsvs.svs import load_vocoder
from nnsvs.train_util import (
check_resf0_config,
collate_fn_default,
collate_fn_random_segments,
compute_batch_pitch_regularization_weight,
compute_distortions,
eval_model,
get_stream_weight,
log_params_from_omegaconf_dict,
save_checkpoint,
save_configs,
setup,
)
from nnsvs.util import PyTorchStandardScaler, make_non_pad_mask, make_pad_mask
from omegaconf import DictConfig
from torch import nn
from torch.cuda.amp import autocast
from torch.nn.parallel import DistributedDataParallel as DDP
def train_step(
logger,
model,
model_config,
optim_config,
optimizer,
grad_scaler,
train,
in_feats,
out_feats,
lengths,
out_scaler,
feats_criterion="mse",
pitch_reg_dyn_ws=1.0,
pitch_reg_weight=1.0,
stream_wise_loss=False,
stream_weights=None,
):
model.train() if train else model.eval()
optimizer.zero_grad()
log_metrics = {}
if feats_criterion in ["l2", "mse"]:
criterion = nn.MSELoss(reduction="none")
elif feats_criterion in ["l1", "mae"]:
criterion = nn.L1Loss(reduction="none")
else:
raise RuntimeError("not supported criterion")
prediction_type = (
model.module.prediction_type()
if isinstance(model, nn.DataParallel) or isinstance(model, DDP)
else model.prediction_type()
)
# Run forward
with autocast(enabled=grad_scaler is not None):
outs = model(in_feats, lengths, out_feats)
if model.has_residual_lf0_prediction():
pred_out_feats, lf0_residual = outs
else:
pred_out_feats, lf0_residual = outs, None
# Mask (B, T, 1)
with torch.no_grad():
mask = make_non_pad_mask(lengths).unsqueeze(-1).to(in_feats.device)
# Compute loss
if prediction_type == PredictionType.MULTISTREAM_HYBRID:
loss_feats = 0
streams = split_streams(out_feats, model_config.stream_sizes)
assert len(streams) == len(pred_out_feats)
if stream_wise_loss:
weights = get_stream_weight(stream_weights, model_config.stream_sizes).to(
in_feats.device
)
else:
weights = [None] * len(streams)
N = 0
for pred_stream, stream, sw in zip(pred_out_feats, streams, weights):
# DDPM
if isinstance(pred_stream, tuple) and len(pred_stream) == 2:
noise, x_recon = pred_stream
loss_feats_ = criterion(
noise.masked_select(mask), x_recon.masked_select(mask)
)
if stream_wise_loss:
loss_feats += sw * loss_feats_.mean()
else:
loss_feats += loss_feats_.sum()
N += len(loss_feats_.view(-1))
# MDN
elif isinstance(pred_stream, tuple) and len(pred_stream) == 3:
pi, sigma, mu = pred_stream
# (B, max(T)) or (B, max(T), D_out)
mask_ = mask if len(pi.shape) == 4 else mask.squeeze(-1)
# Compute loss and apply mask
with autocast(enabled=grad_scaler is not None):
loss_feats_ = mdn_loss(
pi, sigma, mu, stream, reduce=False
).masked_select(mask_)
if stream_wise_loss:
loss_feats += sw * loss_feats_.mean()
else:
loss_feats += loss_feats_.sum()
N += len(loss_feats_.view(-1))
else:
# non-MDN
with autocast(enabled=grad_scaler is not None):
loss_feats_ = criterion(
pred_stream.masked_select(mask), stream.masked_select(mask)
)
if stream_wise_loss:
loss_feats += sw * loss_feats_.mean()
else:
loss_feats += loss_feats_.sum()
N += len(loss_feats_.view(-1))
# NOTE: Mean over batch, time and feature axis
if not stream_wise_loss:
loss_feats /= N
elif prediction_type == PredictionType.PROBABILISTIC:
pi, sigma, mu = pred_out_feats
# (B, max(T)) or (B, max(T), D_out)
mask_ = mask if len(pi.shape) == 4 else mask.squeeze(-1)
# Compute loss and apply mask
with autocast(enabled=grad_scaler is not None):
loss_feats = mdn_loss(pi, sigma, mu, out_feats, reduce=False)
loss_feats = loss_feats.masked_select(mask_).mean()
else:
with autocast(enabled=grad_scaler is not None):
if not isinstance(pred_out_feats, list):
# NOTE: treat as multiple predictions
pred_out_feats = [pred_out_feats]
loss_feats = 0
for pred_out_feats_ in pred_out_feats:
if stream_wise_loss:
weights = get_stream_weight(
stream_weights, model_config.stream_sizes
).to(in_feats.device)
streams = split_streams(out_feats, model_config.stream_sizes)
pred_streams = split_streams(
pred_out_feats_, model_config.stream_sizes
)
else:
streams = [out_feats]
pred_streams = [pred_out_feats_]
weights = [1.0]
for pred_stream, stream, sw in zip(pred_streams, streams, weights):
with autocast(enabled=grad_scaler is not None):
loss_feats += (
sw
* criterion(
pred_stream.masked_select(mask),
stream.masked_select(mask),
).mean()
)
# Pitch regularization
# NOTE: l1 loss seems to be better than mse loss in my experiments
# we could use l2 loss as suggested in the sinsy's paper
if pitch_reg_weight > 0.0 and lf0_residual is not None:
with autocast(enabled=grad_scaler is not None):
if isinstance(lf0_residual, list):
loss_pitch = 0
for lf0_residual_ in lf0_residual:
loss_pitch += (
(pitch_reg_dyn_ws * lf0_residual_.abs())
.masked_select(mask)
.mean()
)
else:
loss_pitch = (
(pitch_reg_dyn_ws * lf0_residual.abs()).masked_select(mask).mean()
)
else:
loss_pitch = torch.tensor(0.0).to(in_feats.device)
loss = loss_feats + pitch_reg_weight * loss_pitch
if not train:
with torch.no_grad():
if prediction_type == PredictionType.MULTISTREAM_HYBRID:
if len(pred_out_feats) == 4:
pred_mgc, pred_lf0, pred_vuv, pred_bap = pred_out_feats
if isinstance(pred_lf0, tuple) and len(pred_lf0) == 3:
pred_lf0 = mdn_get_most_probable_sigma_and_mu(*pred_lf0)[1]
elif isinstance(pred_lf0, tuple) and len(pred_lf0) == 2:
# Diffusion case: noise
pred_lf0 = pred_lf0[1]
if isinstance(pred_mgc, tuple) and len(pred_mgc) == 3:
pred_mgc = mdn_get_most_probable_sigma_and_mu(*pred_mgc)[1]
elif isinstance(pred_mgc, tuple) and len(pred_mgc) == 2:
# Diffusion case: noise
pred_mgc = pred_mgc[1]
if isinstance(pred_bap, tuple) and len(pred_bap) == 3:
pred_bap = mdn_get_most_probable_sigma_and_mu(*pred_bap)[1]
elif isinstance(pred_bap, tuple) and len(pred_bap) == 2:
# Diffusion case: noise
pred_bap = pred_bap[1]
pred_out_feats_ = torch.cat(
[pred_mgc, pred_lf0, pred_vuv, pred_bap], dim=-1
)
elif len(pred_out_feats) == 3:
pred_mel, pred_lf0, pred_vuv = pred_out_feats
if isinstance(pred_lf0, tuple) and len(pred_lf0) == 3:
pred_lf0 = mdn_get_most_probable_sigma_and_mu(*pred_lf0)[1]
elif isinstance(pred_lf0, tuple) and len(pred_lf0) == 2:
# Diffusion case: noise
pred_lf0 = pred_lf0[1]
if isinstance(pred_mel, tuple) and len(pred_mel) == 3:
pred_mel = mdn_get_most_probable_sigma_and_mu(*pred_mel)[1]
elif isinstance(pred_mel, tuple) and len(pred_mel) == 2:
# Diffusion case: noise
pred_mel = pred_mel[1]
pred_out_feats_ = torch.cat([pred_mel, pred_lf0, pred_vuv], dim=-1)
else:
raise RuntimeError("not supported")
elif prediction_type == PredictionType.PROBABILISTIC:
pred_out_feats_ = mdn_get_most_probable_sigma_and_mu(pi, sigma, mu)[1]
else:
if isinstance(pred_out_feats, list):
pred_out_feats_ = pred_out_feats[-1]
else:
pred_out_feats_ = pred_out_feats
distortions = compute_distortions(
pred_out_feats_, out_feats, lengths, out_scaler, model_config
)
log_metrics.update(distortions)
if train:
if grad_scaler is not None:
grad_scaler.scale(loss).backward()
grad_scaler.unscale_(optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), optim_config.clip_norm
)
if not torch.isfinite(grad_norm):
logger.info("grad norm is NaN. Skip updating")
else:
log_metrics["GradNorm"] = grad_norm
grad_scaler.step(optimizer)
grad_scaler.update()
else:
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), optim_config.clip_norm
)
if not torch.isfinite(grad_norm):
logger.info("grad norm is NaN. Skip updating")
else:
log_metrics["GradNorm"] = grad_norm
optimizer.step()
log_metrics.update(
{
"Loss": loss.item(),
"Loss_Feats": loss_feats.item(),
"Loss_Pitch": loss_pitch.item(),
}
)
return loss, log_metrics
def train_loop(
config,
logger,
device,
model,
optimizer,
lr_scheduler,
grad_scaler,
data_loaders,
samplers,
writer,
in_scaler,
out_scaler,
use_mlflow,
vocoder=None,
vocoder_in_scaler=None,
vocoder_config=None,
):
out_dir = Path(to_absolute_path(config.train.out_dir))
best_dev_loss = torch.finfo(torch.float32).max
last_dev_loss = torch.finfo(torch.float32).max
in_lf0_idx = config.data.in_lf0_idx
in_rest_idx = config.data.in_rest_idx
if in_lf0_idx is None or in_rest_idx is None:
raise ValueError("in_lf0_idx and in_rest_idx must be specified")
pitch_reg_weight = config.train.pitch_reg_weight
if "sample_rate" not in config.data:
logger.warning(
"sample_rate is not found in the data config. Fallback to 48000."
)
sr = 48000
else:
sr = config.data.sample_rate
if "feats_criterion" not in config.train:
logger.warning(
"feats_criterion is not found in the train config. Fallback to MSE."
)
feats_criterion = "mse"
else:
feats_criterion = config.train.feats_criterion
if dist.is_initialized() and dist.get_rank() != 0:
def tqdm(x, **kwargs):
return x
else:
from tqdm import tqdm
train_iter = 1
for epoch in tqdm(range(1, config.train.nepochs + 1)):
for phase in data_loaders.keys():
train = phase.startswith("train")
# https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
if dist.is_initialized() and train and samplers[phase] is not None:
samplers[phase].set_epoch(epoch)
running_loss = 0
running_metrics = {}
evaluated = False
for in_feats, out_feats, lengths in tqdm(
data_loaders[phase], desc=f"{phase} iter", leave=False
):
# NOTE: This is needed for pytorch's PackedSequence
lengths, indices = torch.sort(lengths, dim=0, descending=True)
in_feats, out_feats = (
in_feats[indices].to(device),
out_feats[indices].to(device),
)
# Compute denormalized log-F0 in the musical scores
with torch.no_grad():
lf0_score_denorm = (
in_feats[:, :, in_lf0_idx] - in_scaler.min_[in_lf0_idx]
) / in_scaler.scale_[in_lf0_idx]
# Fill zeros for rest and padded frames
lf0_score_denorm *= (in_feats[:, :, in_rest_idx] <= 0).float()
lf0_score_denorm[make_pad_mask(lengths)] = 0
# Compute time-variant pitch regularization weight vector
# NOTE: the current impl. is very slow
if pitch_reg_weight > 0.0:
pitch_reg_dyn_ws = compute_batch_pitch_regularization_weight(
lf0_score_denorm,
decay_size=config.train.pitch_reg_decay_size,
)
else:
pitch_reg_dyn_ws = 1.0
if (not train) and (not evaluated):
eval_model(
phase,
epoch,
model,
in_feats,
out_feats,
lengths,
config.model,
out_scaler,
writer,
sr=sr,
lf0_score_denorm=lf0_score_denorm,
use_world_codec=config.data.use_world_codec,
vocoder=vocoder,
vocoder_in_scaler=vocoder_in_scaler,
vocoder_config=vocoder_config,
max_num_eval_utts=config.train.max_num_eval_utts,
)
evaluated = True
loss, log_metrics = train_step(
logger=logger,
model=model,
model_config=config.model,
optim_config=config.train.optim,
optimizer=optimizer,
grad_scaler=grad_scaler,
train=train,
in_feats=in_feats,
out_feats=out_feats,
lengths=lengths,
out_scaler=out_scaler,
feats_criterion=feats_criterion,
stream_wise_loss=config.train.stream_wise_loss,
stream_weights=config.model.stream_weights,
pitch_reg_dyn_ws=pitch_reg_dyn_ws,
pitch_reg_weight=pitch_reg_weight,
)
if train:
if writer is not None:
for key, val in log_metrics.items():
writer.add_scalar(f"{key}_Step/{phase}", val, train_iter)
train_iter += 1
running_loss += loss.item()
for k, v in log_metrics.items():
try:
running_metrics[k] += float(v)
except KeyError:
running_metrics[k] = float(v)
ave_loss = running_loss / len(data_loaders[phase])
logger.info("[%s] [Epoch %s]: loss %s", phase, epoch, ave_loss)
if writer is not None:
writer.add_scalar(f"Loss_Epoch/{phase}", ave_loss, epoch)
if use_mlflow:
mlflow.log_metric(f"{phase}_loss", ave_loss, step=epoch)
for k, v in running_metrics.items():
ave_v = v / len(data_loaders[phase])
if writer is not None:
writer.add_scalar(f"{k}_Epoch/{phase}", ave_v, epoch)
if use_mlflow:
mlflow.log_metric(f"{phase}_{k}", ave_v, step=epoch)
if not train:
last_dev_loss = ave_loss
if not train and ave_loss < best_dev_loss:
best_dev_loss = ave_loss
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, epoch, is_best=True
)
lr_scheduler.step()
if epoch % config.train.checkpoint_epoch_interval == 0:
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, epoch, is_best=False
)
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, config.train.nepochs
)
logger.info("The best loss was %s", best_dev_loss)
if use_mlflow:
mlflow.log_metric("best_dev_loss", best_dev_loss, step=epoch)
mlflow.log_artifacts(out_dir)
return last_dev_loss
@hydra.main(config_path="conf/train_acoustic", config_name="config")
def my_app(config: DictConfig) -> None:
if "max_time_frames" in config.data and config.data.max_time_frames > 0:
collate_fn = partial(
collate_fn_random_segments, max_time_frames=config.data.max_time_frames
)
else:
if "reduction_factor" in config.model.netG:
collate_fn = partial(
collate_fn_default,
reduction_factor=config.model.netG.reduction_factor,
)
else:
collate_fn = collate_fn_default
if config.train.use_ddp:
dist.init_process_group("nccl")
rank = dist.get_rank()
device_id = rank % torch.cuda.device_count()
torch.cuda.set_device(device_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
(
model,
optimizer,
lr_scheduler,
grad_scaler,
data_loaders,
samplers,
writer,
logger,
in_scaler,
out_scaler,
) = setup(config, device, collate_fn)
path = config.train.pretrained_vocoder_checkpoint
if path is not None and len(path) > 0:
logger.info(f"Loading pretrained vocoder checkpoint from {path}")
vocoder, vocoder_in_scaler, vocoder_config = load_vocoder(
path, device, config.model
)
else:
vocoder, vocoder_in_scaler, vocoder_config = None, None, None
check_resf0_config(logger, model, config, in_scaler, out_scaler)
out_scaler = PyTorchStandardScaler(
torch.from_numpy(out_scaler.mean_), torch.from_numpy(out_scaler.scale_)
).to(device)
use_mlflow = config.mlflow.enabled
if use_mlflow:
with mlflow.start_run() as run:
# NOTE: modify out_dir when running with mlflow
config.train.out_dir = f"{config.train.out_dir}/{run.info.run_id}"
save_configs(config)
log_params_from_omegaconf_dict(config)
last_dev_loss = train_loop(
config=config,
logger=logger,
device=device,
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
grad_scaler=grad_scaler,
data_loaders=data_loaders,
samplers=samplers,
writer=writer,
in_scaler=in_scaler,
out_scaler=out_scaler,
use_mlflow=use_mlflow,
vocoder=vocoder,
vocoder_in_scaler=vocoder_in_scaler,
vocoder_config=vocoder_config,
)
else:
save_configs(config)
last_dev_loss = train_loop(
config=config,
logger=logger,
device=device,
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
grad_scaler=grad_scaler,
data_loaders=data_loaders,
samplers=samplers,
writer=writer,
in_scaler=in_scaler,
out_scaler=out_scaler,
use_mlflow=use_mlflow,
vocoder=vocoder,
vocoder_in_scaler=vocoder_in_scaler,
vocoder_config=vocoder_config,
)
return last_dev_loss
def entry():
my_app()
if __name__ == "__main__":
my_app()
| 21,429 | 36.929204 | 103 | py |
nnsvs | nnsvs-master/nnsvs/bin/synthesis.py | import os
from os.path import join
import hydra
import joblib
import numpy as np
import torch
from hydra.utils import to_absolute_path
from nnmnkwii.io import hts
from nnsvs.gen import (
postprocess_acoustic,
postprocess_waveform,
predict_acoustic,
predict_timing,
predict_waveform,
)
from nnsvs.logger import getLogger
from nnsvs.util import extract_static_scaler, init_seed, load_utt_list, load_vocoder
from omegaconf import DictConfig, OmegaConf
from scipy.io import wavfile
from tqdm.auto import tqdm
@hydra.main(config_path="conf/synthesis", config_name="config")
def my_app(config: DictConfig) -> None:
global logger
logger = getLogger(config.verbose)
logger.info(OmegaConf.to_yaml(config))
if not torch.cuda.is_available():
device = torch.device("cpu")
else:
device = torch.device(config.device)
# timelag
timelag_config = OmegaConf.load(to_absolute_path(config.timelag.model_yaml))
timelag_model = hydra.utils.instantiate(timelag_config.netG).to(device)
checkpoint = torch.load(
to_absolute_path(config.timelag.checkpoint),
map_location=lambda storage, loc: storage,
)
timelag_model.load_state_dict(checkpoint["state_dict"])
timelag_in_scaler = joblib.load(to_absolute_path(config.timelag.in_scaler_path))
timelag_out_scaler = joblib.load(to_absolute_path(config.timelag.out_scaler_path))
timelag_model.eval()
# duration
duration_config = OmegaConf.load(to_absolute_path(config.duration.model_yaml))
duration_model = hydra.utils.instantiate(duration_config.netG).to(device)
checkpoint = torch.load(
to_absolute_path(config.duration.checkpoint),
map_location=lambda storage, loc: storage,
)
duration_model.load_state_dict(checkpoint["state_dict"])
duration_in_scaler = joblib.load(to_absolute_path(config.duration.in_scaler_path))
duration_out_scaler = joblib.load(to_absolute_path(config.duration.out_scaler_path))
duration_model.eval()
# acoustic model
acoustic_config = OmegaConf.load(to_absolute_path(config.acoustic.model_yaml))
acoustic_model = hydra.utils.instantiate(acoustic_config.netG).to(device)
checkpoint = torch.load(
to_absolute_path(config.acoustic.checkpoint),
map_location=lambda storage, loc: storage,
)
acoustic_model.load_state_dict(checkpoint["state_dict"])
acoustic_in_scaler = joblib.load(to_absolute_path(config.acoustic.in_scaler_path))
acoustic_out_scaler = joblib.load(to_absolute_path(config.acoustic.out_scaler_path))
acoustic_model.eval()
# NOTE: this is used for GV post-filtering
acoustic_out_static_scaler = extract_static_scaler(
acoustic_out_scaler, acoustic_config
)
# Vocoder
if config.vocoder.checkpoint is not None and len(config.vocoder.checkpoint) > 0:
vocoder, vocoder_in_scaler, vocoder_config = load_vocoder(
to_absolute_path(config.vocoder.checkpoint),
device,
acoustic_config,
)
else:
vocoder, vocoder_in_scaler, vocoder_config = None, None, None
if config.synthesis.vocoder_type != "world":
logger.warning("Vocoder checkpoint is not specified")
logger.info(f"Use world instead of {config.synthesis.vocoder_type}.")
config.synthesis.vocoder_type = "world"
# Run synthesis for each utt.
binary_dict, numeric_dict = hts.load_question_set(
to_absolute_path(config.synthesis.qst)
)
in_dir = to_absolute_path(config.in_dir)
out_dir = to_absolute_path(config.out_dir)
os.makedirs(out_dir, exist_ok=True)
utt_ids = load_utt_list(to_absolute_path(config.utt_list))
logger.info("Processes %s utterances...", len(utt_ids))
for utt_id in tqdm(utt_ids):
labels = hts.load(join(in_dir, f"{utt_id}.lab"))
hts_frame_shift = int(config.synthesis.frame_period * 1e4)
labels.frame_shift = hts_frame_shift
init_seed(1234)
if config.synthesis.ground_truth_duration:
duration_modified_labels = labels
else:
duration_modified_labels = predict_timing(
device=device,
labels=labels,
binary_dict=binary_dict,
numeric_dict=numeric_dict,
timelag_model=timelag_model,
timelag_config=timelag_config,
timelag_in_scaler=timelag_in_scaler,
timelag_out_scaler=timelag_out_scaler,
duration_model=duration_model,
duration_config=duration_config,
duration_in_scaler=duration_in_scaler,
duration_out_scaler=duration_out_scaler,
log_f0_conditioning=config.synthesis.log_f0_conditioning,
allowed_range=config.timelag.allowed_range,
allowed_range_rest=config.timelag.allowed_range_rest,
force_clip_input_features=config.timelag.force_clip_input_features,
frame_period=config.synthesis.frame_period,
)
# Predict acoustic features
acoustic_features = predict_acoustic(
device=device,
labels=duration_modified_labels,
acoustic_model=acoustic_model,
acoustic_config=acoustic_config,
acoustic_in_scaler=acoustic_in_scaler,
acoustic_out_scaler=acoustic_out_scaler,
binary_dict=binary_dict,
numeric_dict=numeric_dict,
subphone_features=config.synthesis.subphone_features,
log_f0_conditioning=config.synthesis.log_f0_conditioning,
force_clip_input_features=config.acoustic.force_clip_input_features,
f0_shift_in_cent=config.synthesis.pre_f0_shift_in_cent,
)
# NOTE: the output of this function is tuple of features
# e.g., (mgc, lf0, vuv, bap)
multistream_features = postprocess_acoustic(
device=device,
acoustic_features=acoustic_features,
duration_modified_labels=duration_modified_labels,
binary_dict=binary_dict,
numeric_dict=numeric_dict,
acoustic_config=acoustic_config,
acoustic_out_static_scaler=acoustic_out_static_scaler,
postfilter_model=None, # NOTE: learned post-filter is not supported
postfilter_config=None,
postfilter_out_scaler=None,
sample_rate=config.synthesis.sample_rate,
frame_period=config.synthesis.frame_period,
relative_f0=config.synthesis.relative_f0,
feature_type=config.synthesis.feature_type,
post_filter_type=config.synthesis.post_filter_type,
trajectory_smoothing=config.synthesis.trajectory_smoothing,
trajectory_smoothing_cutoff=config.synthesis.trajectory_smoothing_cutoff,
trajectory_smoothing_cutoff_f0=config.synthesis.trajectory_smoothing_cutoff_f0,
vuv_threshold=config.synthesis.vuv_threshold,
f0_shift_in_cent=config.synthesis.post_f0_shift_in_cent,
vibrato_scale=1.0,
force_fix_vuv=config.synthesis.force_fix_vuv,
)
# Generate waveform by vocoder
wav = predict_waveform(
device=device,
multistream_features=multistream_features,
vocoder=vocoder,
vocoder_config=vocoder_config,
vocoder_in_scaler=vocoder_in_scaler,
sample_rate=config.synthesis.sample_rate,
frame_period=config.synthesis.frame_period,
use_world_codec=config.synthesis.use_world_codec,
feature_type=config.synthesis.feature_type,
vocoder_type=config.synthesis.vocoder_type,
vuv_threshold=config.synthesis.vuv_threshold,
)
wav = postprocess_waveform(
wav=wav,
sample_rate=config.synthesis.sample_rate,
dtype=np.int16,
peak_norm=False,
loudness_norm=False,
)
out_wav_path = join(out_dir, f"{utt_id}.wav")
wavfile.write(
out_wav_path, rate=config.synthesis.sample_rate, data=wav.astype(np.int16)
)
def entry():
my_app() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
my_app() # pylint: disable=no-value-for-parameter
| 8,358 | 39.381643 | 91 | py |
nnsvs | nnsvs-master/nnsvs/bin/gen_static_features.py | import os
from os.path import exists, join
import hydra
import joblib
import numpy as np
import pyworld
import torch
from hydra.utils import to_absolute_path
from nnsvs.acoustic_models.util import pad_inference
from nnsvs.base import PredictionType
from nnsvs.gen import get_windows
from nnsvs.logger import getLogger
from nnsvs.mdn import mdn_get_most_probable_sigma_and_mu
from nnsvs.multistream import (
get_static_features,
get_static_stream_sizes,
multi_stream_mlpg,
split_streams,
)
from nnsvs.postfilters import variance_scaling
from nnsvs.util import StandardScaler, load_utt_list
from omegaconf import DictConfig, OmegaConf
from tqdm import tqdm
logger = None
use_cuda = torch.cuda.is_available()
@torch.no_grad()
def _gen_static_features(
model, model_config, in_feats, gt_feats, out_scaler, gta=False
):
if model.prediction_type() == PredictionType.PROBABILISTIC:
if gta:
raise ValueError("GTA not supported for probabilistic models for now")
else:
max_mu, max_sigma = model.inference(in_feats, [in_feats.shape[1]])
if np.any(model_config.has_dynamic_features):
# Apply denormalization
# (B, T, D_out) -> (T, D_out)
max_sigma_sq = (
max_sigma.squeeze(0).cpu().data.numpy() ** 2 * out_scaler.var_
)
max_mu = out_scaler.inverse_transform(max_mu.squeeze(0).cpu().data.numpy())
# Apply MLPG
# (T, D_out) -> (T, static_dim)
out_feats = multi_stream_mlpg(
max_mu,
max_sigma_sq,
get_windows(model_config.num_windows),
model_config.stream_sizes,
model_config.has_dynamic_features,
)
else:
# (T, D_out)
out_feats = max_mu.squeeze(0).cpu().data.numpy()
out_feats = out_scaler.inverse_transform(out_feats)
else:
if gta:
if hasattr(model, "reduction_factor"):
out_feats = pad_inference(
model,
in_feats,
[in_feats.shape[1]],
model.reduction_factor,
mode="replicate",
y=gt_feats,
mdn=model.prediction_type() in [PredictionType.PROBABILISTIC, 3],
)
if model.prediction_type() == PredictionType.PROBABILISTIC:
# (sigma, mu) -> (mu)
out_feats = mdn_get_most_probable_sigma_and_mu(out_feats)[1]
elif model.prediction_type() == 3:
# (mu, sigma) -> (mu)
out_feats = out_feats[0]
else:
out_feats = model(in_feats, [in_feats.shape[1]], gt_feats)
# out + residual case
out_feats = out_feats[0] if isinstance(out_feats, tuple) else out_feats
# multiple outputs case
out_feats = out_feats[-1] if isinstance(out_feats, list) else out_feats
out_feats = out_feats.squeeze(0).cpu().data.numpy()
else:
out_feats = (
model.inference(in_feats, [in_feats.shape[1]])
.squeeze(0)
.cpu()
.data.numpy()
)
out_feats = out_scaler.inverse_transform(out_feats)
# Apply MLPG if necessary
if np.any(model_config.has_dynamic_features):
out_feats = multi_stream_mlpg(
out_feats,
out_scaler.var_,
get_windows(model_config.num_windows),
model_config.stream_sizes,
model_config.has_dynamic_features,
)
return out_feats.astype(np.float32)
@hydra.main(config_path="conf/gen_static_features", config_name="config")
def my_app(config: DictConfig) -> None:
global logger
logger = getLogger(config.verbose)
logger.info(OmegaConf.to_yaml(config))
device = torch.device("cuda" if use_cuda else "cpu")
utt_list = to_absolute_path(config.utt_list)
in_dir = to_absolute_path(config.in_dir)
gt_dir = to_absolute_path(config.gt_dir)
out_dir = to_absolute_path(config.out_dir)
utt_ids = load_utt_list(utt_list)
os.makedirs(out_dir, exist_ok=True)
model_config = OmegaConf.load(to_absolute_path(config.model.model_yaml))
model = hydra.utils.instantiate(model_config.netG).to(device)
checkpoint = torch.load(
to_absolute_path(config.model.checkpoint),
map_location=lambda storage, loc: storage,
)
model.load_state_dict(checkpoint["state_dict"])
model.eval()
out_scaler = joblib.load(to_absolute_path(config.out_scaler_path))
mean_ = get_static_features(
out_scaler.mean_.reshape(1, 1, out_scaler.mean_.shape[-1]),
model_config.num_windows,
model_config.stream_sizes,
model_config.has_dynamic_features,
)
mean_ = np.concatenate(mean_, -1).reshape(1, -1)
var_ = get_static_features(
out_scaler.var_.reshape(1, 1, out_scaler.var_.shape[-1]),
model_config.num_windows,
model_config.stream_sizes,
model_config.has_dynamic_features,
)
var_ = np.concatenate(var_, -1).reshape(1, -1)
scale_ = get_static_features(
out_scaler.scale_.reshape(1, 1, out_scaler.scale_.shape[-1]),
model_config.num_windows,
model_config.stream_sizes,
model_config.has_dynamic_features,
)
scale_ = np.concatenate(scale_, -1).reshape(1, -1)
static_scaler = StandardScaler(mean_, var_, scale_)
static_stream_sizes = get_static_stream_sizes(
model_config.stream_sizes,
model_config.has_dynamic_features,
model_config.num_windows,
)
for utt_id in tqdm(utt_ids):
if not exists(join(gt_dir, utt_id + "-feats.npy")):
print(f"Skip {utt_id}")
continue
in_feats = (
torch.from_numpy(np.load(join(in_dir, utt_id + "-feats.npy")))
.unsqueeze(0)
.to(device)
)
gt_feats = (
torch.from_numpy(np.load(join(gt_dir, utt_id + "-feats.npy")))
.unsqueeze(0)
.to(device)
)
static_feats = _gen_static_features(
model, model_config, in_feats, gt_feats, out_scaler, config.gta
)
outs = split_streams(static_feats, static_stream_sizes)
if len(outs) == 4:
mgc, lf0, vuv, bap = outs
mgc_end_dim = static_stream_sizes[0]
bap_start_dim = sum(static_stream_sizes[:3])
bap_end_dim = sum(static_stream_sizes[:4])
if config.gv_postfilter:
# mgc
mgc = variance_scaling(
static_scaler.var_.reshape(-1)[:mgc_end_dim],
mgc,
offset=config.mgc_offset,
)
# bap
bap = variance_scaling(
static_scaler.var_.reshape(-1)[bap_start_dim:bap_end_dim],
bap,
offset=config.bap_offset,
)
if config.mgc2sp:
# 24k: 1024
# 48: 2048
fft_size = pyworld.get_cheaptrick_fft_size(config.sample_rate)
sp = np.log(
pyworld.decode_spectral_envelope(
mgc.astype(np.float64), config.sample_rate, fft_size
).astype(np.float32)
)
static_feats = np.concatenate([sp, lf0, vuv, bap], axis=-1)
else:
static_feats = np.concatenate([mgc, lf0, vuv, bap], axis=-1)
elif len(outs) == 3:
mel, lf0, vuv = outs
# NOTE: may add postfilter here
static_feats = np.concatenate([mel, lf0, vuv], axis=-1)
else:
raise ValueError("Invalid number of streams")
gt_feats = gt_feats.squeeze(0)
if len(static_feats) != len(gt_feats):
print(static_feats.shape, gt_feats.shape)
raise RuntimeError(f"Length mismatch in {utt_id}")
if config.normalize:
assert not config.mgc2sp, "need to compute normalization stats"
static_feats = static_scaler.transform(static_feats)
out_path = join(out_dir, f"{utt_id}-feats.npy")
np.save(out_path, static_feats.astype(np.float32), allow_pickle=False)
def entry():
my_app()
if __name__ == "__main__":
my_app()
| 8,527 | 34.385892 | 87 | py |
nnsvs | nnsvs-master/nnsvs/bin/train.py | from functools import partial
from pathlib import Path
import hydra
import mlflow
import numpy as np
import torch
import torch.distributed as dist
from hydra.utils import to_absolute_path
from nnmnkwii import metrics
from nnsvs.base import PredictionType
from nnsvs.mdn import mdn_get_most_probable_sigma_and_mu, mdn_loss
from nnsvs.multistream import split_streams
from nnsvs.train_util import (
collate_fn_default,
collate_fn_random_segments,
get_stream_weight,
log_params_from_omegaconf_dict,
save_checkpoint,
save_configs,
setup,
)
from nnsvs.util import PyTorchStandardScaler, make_non_pad_mask
from omegaconf import DictConfig
from torch import nn
from torch.cuda.amp import autocast
@torch.no_grad()
def compute_distortions(pred_out_feats, out_feats, lengths, out_scaler):
assert pred_out_feats.shape == out_feats.shape
out_feats = out_scaler.inverse_transform(out_feats)
pred_out_feats = out_scaler.inverse_transform(pred_out_feats)
dist = {}
try:
dist["ObjEval_RMSE"] = np.sqrt(
metrics.mean_squared_error(out_feats, pred_out_feats, lengths=lengths)
)
except ZeroDivisionError:
pass
return dist
def train_step(
model,
optimizer,
grad_scaler,
train,
in_feats,
out_feats,
lengths,
out_scaler,
feats_criterion="mse",
stream_wise_loss=False,
stream_weights=None,
stream_sizes=None,
):
model.train() if train else model.eval()
optimizer.zero_grad()
if feats_criterion in ["l2", "mse"]:
criterion = nn.MSELoss(reduction="none")
elif feats_criterion in ["l1", "mae"]:
criterion = nn.L1Loss(reduction="none")
else:
raise RuntimeError("not supported criterion")
prediction_type = (
model.module.prediction_type()
if isinstance(model, nn.DataParallel)
else model.prediction_type()
)
# Apply preprocess if required (e.g., FIR filter for shallow AR)
# defaults to no-op
if isinstance(model, nn.DataParallel):
out_feats = model.module.preprocess_target(out_feats)
else:
out_feats = model.preprocess_target(out_feats)
# Run forward
with autocast(enabled=grad_scaler is not None):
pred_out_feats = model(in_feats, lengths)
# Mask (B, T, 1)
mask = make_non_pad_mask(lengths).unsqueeze(-1).to(in_feats.device)
# Compute loss
if prediction_type == PredictionType.PROBABILISTIC:
pi, sigma, mu = pred_out_feats
# (B, max(T)) or (B, max(T), D_out)
mask_ = mask if len(pi.shape) == 4 else mask.squeeze(-1)
# Compute loss and apply mask
with autocast(enabled=grad_scaler is not None):
loss = mdn_loss(pi, sigma, mu, out_feats, reduce=False)
loss = loss.masked_select(mask_).mean()
else:
if stream_wise_loss:
w = get_stream_weight(stream_weights, stream_sizes).to(in_feats.device)
streams = split_streams(out_feats, stream_sizes)
pred_streams = split_streams(pred_out_feats, stream_sizes)
loss = 0
for pred_stream, stream, sw in zip(pred_streams, streams, w):
with autocast(enabled=grad_scaler is not None):
loss += (
sw
* criterion(
pred_stream.masked_select(mask), stream.masked_select(mask)
).mean()
)
else:
with autocast(enabled=grad_scaler is not None):
loss = criterion(
pred_out_feats.masked_select(mask), out_feats.masked_select(mask)
).mean()
if prediction_type == PredictionType.PROBABILISTIC:
with torch.no_grad():
pred_out_feats_ = mdn_get_most_probable_sigma_and_mu(pi, sigma, mu)[1]
else:
pred_out_feats_ = pred_out_feats
distortions = compute_distortions(pred_out_feats_, out_feats, lengths, out_scaler)
if train:
if grad_scaler is not None:
grad_scaler.scale(loss).backward()
grad_scaler.step(optimizer)
grad_scaler.update()
else:
loss.backward()
optimizer.step()
return loss, distortions
def train_loop(
config,
logger,
device,
model,
optimizer,
lr_scheduler,
grad_scaler,
data_loaders,
samplers,
writer,
out_scaler,
use_mlflow,
):
out_dir = Path(to_absolute_path(config.train.out_dir))
best_dev_loss = torch.finfo(torch.float32).max
last_dev_loss = torch.finfo(torch.float32).max
if "feats_criterion" not in config.train:
logger.warning(
"feats_criterion is not found in the train config. Fallback to MSE."
)
feats_criterion = "mse"
else:
feats_criterion = config.train.feats_criterion
if dist.is_initialized() and dist.get_rank() != 0:
def tqdm(x, **kwargs):
return x
else:
from tqdm import tqdm
train_iter = 1
for epoch in tqdm(range(1, config.train.nepochs + 1)):
for phase in data_loaders.keys():
train = phase.startswith("train")
# https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
if dist.is_initialized() and train and samplers[phase] is not None:
samplers[phase].set_epoch(epoch)
running_loss = 0
running_metrics = {}
for in_feats, out_feats, lengths in tqdm(
data_loaders[phase], desc=f"{phase} iter", leave=False
):
# NOTE: This is needed for pytorch's PackedSequence
lengths, indices = torch.sort(lengths, dim=0, descending=True)
in_feats, out_feats = (
in_feats[indices].to(device),
out_feats[indices].to(device),
)
loss, log_metrics = train_step(
model=model,
optimizer=optimizer,
grad_scaler=grad_scaler,
train=train,
in_feats=in_feats,
out_feats=out_feats,
lengths=lengths,
out_scaler=out_scaler,
feats_criterion=feats_criterion,
stream_wise_loss=config.train.stream_wise_loss,
stream_weights=config.model.stream_weights,
stream_sizes=config.model.stream_sizes,
)
if train:
if writer is not None:
for key, val in log_metrics.items():
writer.add_scalar(f"{key}_Step/{phase}", val, train_iter)
train_iter += 1
running_loss += loss.item()
for k, v in log_metrics.items():
try:
running_metrics[k] += float(v)
except KeyError:
running_metrics[k] = float(v)
ave_loss = running_loss / len(data_loaders[phase])
if writer is not None:
writer.add_scalar(f"Loss/{phase}", ave_loss, epoch)
if use_mlflow:
mlflow.log_metric(f"{phase}_loss", ave_loss, step=epoch)
ave_loss = running_loss / len(data_loaders[phase])
logger.info("[%s] [Epoch %s]: loss %s", phase, epoch, ave_loss)
if writer is not None:
writer.add_scalar(f"Loss_Epoch/{phase}", ave_loss, epoch)
if use_mlflow:
mlflow.log_metric(f"{phase}_loss", ave_loss, step=epoch)
for k, v in running_metrics.items():
ave_v = v / len(data_loaders[phase])
if writer is not None:
writer.add_scalar(f"{k}_Epoch/{phase}", ave_v, epoch)
if use_mlflow:
mlflow.log_metric(f"{phase}_{k}", ave_v, step=epoch)
if not train:
last_dev_loss = ave_loss
if not train and ave_loss < best_dev_loss:
best_dev_loss = ave_loss
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, epoch, is_best=True
)
lr_scheduler.step()
if epoch % config.train.checkpoint_epoch_interval == 0:
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, epoch, is_best=False
)
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, config.train.nepochs
)
logger.info("The best loss was %s", best_dev_loss)
if use_mlflow:
mlflow.log_metric("best_dev_loss", best_dev_loss, step=epoch)
mlflow.log_artifacts(out_dir)
return last_dev_loss
@hydra.main(config_path="conf/train", config_name="config")
def my_app(config: DictConfig) -> None:
if "max_time_frames" in config.data and config.data.max_time_frames > 0:
collate_fn = partial(
collate_fn_random_segments, max_time_frames=config.data.max_time_frames
)
else:
collate_fn = collate_fn_default
if config.train.use_ddp:
dist.init_process_group("nccl")
rank = dist.get_rank()
device_id = rank % torch.cuda.device_count()
torch.cuda.set_device(device_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
(
model,
optimizer,
lr_scheduler,
grad_scaler,
data_loaders,
samplers,
writer,
logger,
_,
out_scaler,
) = setup(config, device, collate_fn)
out_scaler = PyTorchStandardScaler(
torch.from_numpy(out_scaler.mean_), torch.from_numpy(out_scaler.scale_)
).to(device)
use_mlflow = config.mlflow.enabled
if use_mlflow:
with mlflow.start_run() as run:
# NOTE: modify out_dir when running with mlflow
config.train.out_dir = f"{config.train.out_dir}/{run.info.run_id}"
save_configs(config)
log_params_from_omegaconf_dict(config)
last_dev_loss = train_loop(
config=config,
logger=logger,
device=device,
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
grad_scaler=grad_scaler,
data_loaders=data_loaders,
samplers=samplers,
writer=writer,
out_scaler=out_scaler,
use_mlflow=use_mlflow,
)
else:
save_configs(config)
last_dev_loss = train_loop(
config=config,
logger=logger,
device=device,
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
grad_scaler=grad_scaler,
data_loaders=data_loaders,
samplers=samplers,
writer=writer,
out_scaler=out_scaler,
use_mlflow=use_mlflow,
)
return last_dev_loss
def entry():
my_app()
if __name__ == "__main__":
my_app()
| 11,217 | 31.705539 | 103 | py |
nnsvs | nnsvs-master/nnsvs/bin/anasyn.py | import os
from os.path import join
import hydra
import numpy as np
import pysptk
import pyworld
import torch
from hydra.utils import to_absolute_path
from nnsvs.dsp import bandpass_filter
from nnsvs.gen import gen_world_params
from nnsvs.logger import getLogger
from nnsvs.multistream import get_static_stream_sizes, split_streams
from nnsvs.svs import load_vocoder
from nnsvs.util import init_seed, load_utt_list
from omegaconf import DictConfig, OmegaConf
from scipy.io import wavfile
from tqdm.auto import tqdm
@torch.no_grad()
def anasyn(
device,
acoustic_features,
acoustic_config,
vocoder=None,
vocoder_config=None,
vocoder_in_scaler=None,
sample_rate=48000,
frame_period=5,
vuv_threshold=0.5,
use_world_codec=True,
feature_type="world",
vocoder_type="world",
):
static_stream_sizes = get_static_stream_sizes(
acoustic_config.stream_sizes,
acoustic_config.has_dynamic_features,
acoustic_config.num_windows,
)
# Split multi-stream features
streams = split_streams(acoustic_features, static_stream_sizes)
# Generate WORLD parameters
if feature_type == "world":
assert len(streams) == 4
mgc, lf0, vuv, bap = streams
elif feature_type == "melf0":
mel, lf0, vuv = split_streams(acoustic_features, [80, 1, 1])
else:
raise ValueError(f"Unknown feature type: {feature_type}")
# Waveform generation by (1) WORLD or (2) neural vocoder
if vocoder_type == "world":
f0, spectrogram, aperiodicity = gen_world_params(
mgc,
lf0,
vuv,
bap,
sample_rate,
vuv_threshold=vuv_threshold,
use_world_codec=use_world_codec,
)
wav = pyworld.synthesize(
f0,
spectrogram,
aperiodicity,
sample_rate,
frame_period,
)
elif vocoder_type == "pwg":
# NOTE: So far vocoder models are trained on binary V/UV features
vuv = (vuv > vuv_threshold).astype(np.float32)
if feature_type == "world":
voc_inp = (
torch.from_numpy(
vocoder_in_scaler.transform(
np.concatenate([mgc, lf0, vuv, bap], axis=-1)
)
)
.float()
.to(device)
)
elif feature_type == "melf0":
voc_inp = (
torch.from_numpy(
vocoder_in_scaler.transform(
np.concatenate([mel, lf0, vuv], axis=-1)
)
)
.float()
.to(device)
)
wav = vocoder.inference(voc_inp).view(-1).to("cpu").numpy()
elif vocoder_type == "usfgan":
if feature_type == "world":
fftlen = pyworld.get_cheaptrick_fft_size(sample_rate)
use_mcep_aperiodicity = bap.shape[-1] > 5
if use_mcep_aperiodicity:
mcep_aperiodicity_order = bap.shape[-1] - 1
alpha = pysptk.util.mcepalpha(sample_rate)
aperiodicity = pysptk.mc2sp(
np.ascontiguousarray(bap).astype(np.float64),
fftlen=fftlen,
alpha=alpha,
)
else:
aperiodicity = pyworld.decode_aperiodicity(
np.ascontiguousarray(bap).astype(np.float64), sample_rate, fftlen
)
# fill aperiodicity with ones for unvoiced regions
aperiodicity[vuv.reshape(-1) < vuv_threshold, 0] = 1.0
# WORLD fails catastrophically for out of range aperiodicity
aperiodicity = np.clip(aperiodicity, 0.0, 1.0)
# back to bap
if use_mcep_aperiodicity:
bap = pysptk.sp2mc(
aperiodicity,
order=mcep_aperiodicity_order,
alpha=alpha,
)
else:
bap = pyworld.code_aperiodicity(aperiodicity, sample_rate).astype(
np.float32
)
aux_feats = (
torch.from_numpy(
vocoder_in_scaler.transform(np.concatenate([mgc, bap], axis=-1))
)
.float()
.to(device)
)
elif feature_type == "melf0":
# NOTE: So far vocoder models are trained on binary V/UV features
vuv = (vuv > vuv_threshold).astype(np.float32)
aux_feats = (
torch.from_numpy(vocoder_in_scaler.transform(mel)).float().to(device)
)
contf0 = np.exp(lf0)
if vocoder_config.data.sine_f0_type in ["contf0", "cf0"]:
f0_inp = contf0
elif vocoder_config.data.sine_f0_type == "f0":
f0_inp = contf0
f0_inp[vuv < vuv_threshold] = 0
wav = vocoder.inference(f0_inp, aux_feats).view(-1).to("cpu").numpy()
return wav
def post_process(wav, sample_rate):
wav = bandpass_filter(wav, sample_rate)
if np.max(wav) > 10:
if np.abs(wav).max() > 32767:
wav = wav / np.abs(wav).max()
# data is likely already in [-32768, 32767]
wav = wav.astype(np.int16)
else:
if np.abs(wav).max() > 1.0:
wav = wav / np.abs(wav).max()
wav = (wav * 32767.0).astype(np.int16)
return wav
@hydra.main(config_path="conf/synthesis", config_name="config")
def my_app(config: DictConfig) -> None:
global logger
logger = getLogger(config.verbose)
logger.info(OmegaConf.to_yaml(config))
if not torch.cuda.is_available():
device = torch.device("cpu")
else:
device = torch.device(config.device)
acoustic_config = OmegaConf.load(to_absolute_path(config.acoustic.model_yaml))
# Vocoder
if config.vocoder.checkpoint is not None and len(config.vocoder.checkpoint) > 0:
vocoder, vocoder_in_scaler, vocoder_config = load_vocoder(
to_absolute_path(config.vocoder.checkpoint),
device,
acoustic_config,
)
else:
vocoder, vocoder_in_scaler, vocoder_config = None, None, None
if config.synthesis.vocoder_type != "world":
logger.warning("Vocoder checkpoint is not specified")
logger.info(f"Use world instead of {config.synthesis.vocoder_type}.")
config.synthesis.vocoder_type = "world"
# Run synthesis for each utt.
in_dir = to_absolute_path(config.in_dir)
out_dir = to_absolute_path(config.out_dir)
os.makedirs(out_dir, exist_ok=True)
utt_ids = load_utt_list(to_absolute_path(config.utt_list))
logger.info("Processes %s utterances...", len(utt_ids))
for utt_id in tqdm(utt_ids):
acoustic_features = np.load(join(in_dir, f"{utt_id}-feats.npy"))
init_seed(1234)
wav = anasyn(
device=device,
acoustic_features=acoustic_features,
acoustic_config=acoustic_config,
vocoder=vocoder,
vocoder_config=vocoder_config,
vocoder_in_scaler=vocoder_in_scaler,
sample_rate=config.synthesis.sample_rate,
frame_period=config.synthesis.frame_period,
use_world_codec=config.synthesis.use_world_codec,
feature_type=config.synthesis.feature_type,
vocoder_type=config.synthesis.vocoder_type,
vuv_threshold=config.synthesis.vuv_threshold,
)
wav = post_process(wav, config.synthesis.sample_rate)
out_wav_path = join(out_dir, f"{utt_id}.wav")
wavfile.write(
out_wav_path, rate=config.synthesis.sample_rate, data=wav.astype(np.int16)
)
def entry():
my_app() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
my_app() # pylint: disable=no-value-for-parameter
| 7,921 | 33.146552 | 86 | py |
nnsvs | nnsvs-master/nnsvs/layers/conv.py | from torch import nn
from torch.nn.utils import weight_norm
def WNConv1d(*args, **kwargs):
return weight_norm(nn.Conv1d(*args, **kwargs))
class ResnetBlock(nn.Module):
def __init__(self, dim, dilation=1):
super().__init__()
self.block = nn.Sequential(
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(dilation),
WNConv1d(dim, dim, kernel_size=3, dilation=dilation),
nn.LeakyReLU(0.2),
WNConv1d(dim, dim, kernel_size=1),
)
self.shortcut = WNConv1d(dim, dim, kernel_size=1)
def forward(self, x):
return self.shortcut(x) + self.block(x)
| 640 | 26.869565 | 65 | py |
nnsvs | nnsvs-master/nnsvs/layers/layer_norm.py | # Adapted from https://github.com/espnet/espnet
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Layer normalization module."""
import torch
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
Args:
nout (int): Output dim size.
dim (int): Dimension to be normalized.
"""
def __init__(self, nout, dim=-1):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=1e-12)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Normalized tensor.
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return (
super(LayerNorm, self)
.forward(x.transpose(self.dim, -1))
.transpose(self.dim, -1)
)
| 953 | 25.5 | 59 | py |
nnsvs | nnsvs-master/nnsvs/wavenet/modules.py | import torch
from nnsvs.wavenet import conv
from torch import nn
def Conv1d(in_channels, out_channels, kernel_size, *args, **kwargs):
"""Weight-normalized Conv1d layer."""
m = conv.Conv1d(in_channels, out_channels, kernel_size, *args, **kwargs)
return nn.utils.weight_norm(m)
def Conv1d1x1(in_channels, out_channels, bias=True):
"""1x1 Weight-normalized Conv1d layer."""
return Conv1d(in_channels, out_channels, kernel_size=1, bias=bias)
class ResSkipBlock(nn.Module):
"""Convolution block with residual and skip connections.
Args:
residual_channels (int): Residual connection channels.
gate_channels (int): Gated activation channels.
kernel_size (int): Kernel size of convolution layers.
skip_out_channels (int): Skip connection channels.
dilation (int): Dilation factor.
cin_channels (int): Local conditioning channels.
args (list): Additional arguments for Conv1d.
kwargs (dict): Additional arguments for Conv1d.
"""
def __init__(
self,
residual_channels, # 残差結合のチャネル数
gate_channels, # ゲートのチャネル数
kernel_size, # カーネルサイズ
skip_out_channels, # スキップ結合のチャネル数
dilation=1, # dilation factor
cin_channels=80, # 条件付特徴量のチャネル数
*args,
**kwargs,
):
super().__init__()
self.padding = (kernel_size - 1) * dilation
# 1 次元膨張畳み込み (dilation == 1 のときは、通常の1 次元畳み込み)
self.conv = Conv1d(
residual_channels,
gate_channels,
kernel_size,
*args,
padding=self.padding,
dilation=dilation,
**kwargs,
)
# local conditioning 用の 1x1 convolution
self.conv1x1c = Conv1d1x1(cin_channels, gate_channels, bias=False)
# ゲート付き活性化関数のために、1 次元畳み込みの出力は2 分割されることに注意
gate_out_channels = gate_channels // 2
self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels)
self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_out_channels)
def forward(self, x, c):
"""Forward step
Args:
x (torch.Tensor): Input signal.
c (torch.Tensor): Local conditioning signal.
Returns:
tuple: Tuple of output signal and skip connection signal
"""
return self._forward(x, c, False)
def incremental_forward(self, x, c):
"""Incremental forward
Args:
x (torch.Tensor): Input signal.
c (torch.Tensor): Local conditioning signal.
Returns:
tuple: Tuple of output signal and skip connection signal
"""
return self._forward(x, c, True)
def _forward(self, x, c, is_incremental):
# 残差接続用に入力を保持
residual = x
# メインの dilated convolutionの計算
# 推論時と学習時で入力のテンソルのshapeが異なるのに注意
if is_incremental:
splitdim = -1 # (B, T, C)
x = self.conv.incremental_forward(x)
else:
splitdim = 1 # (B, C, T)
x = self.conv(x)
# 因果性を保証するために、出力をシフトする
x = x[:, :, : -self.padding]
# チャンネル方向で出力を分割
a, b = x.split(x.size(splitdim) // 2, dim=splitdim)
# local conditioning
c = self._conv1x1_forward(self.conv1x1c, c, is_incremental)
ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim)
a, b = a + ca, b + cb
# ゲート付き活性化関数
x = torch.tanh(a) * torch.sigmoid(b)
# スキップ接続用の出力を計算
s = self._conv1x1_forward(self.conv1x1_skip, x, is_incremental)
# 残差接続の要素和行う前に、次元数を合わせる
x = self._conv1x1_forward(self.conv1x1_out, x, is_incremental)
x = x + residual
return x, s
def _conv1x1_forward(self, conv, x, is_incremental):
if is_incremental:
x = conv.incremental_forward(x)
else:
x = conv(x)
return x
def clear_buffer(self):
"""Clear input buffer."""
for c in [
self.conv,
self.conv1x1_out,
self.conv1x1_skip,
self.conv1x1c,
]:
if c is not None:
c.clear_buffer()
| 4,184 | 28.680851 | 76 | py |
nnsvs | nnsvs-master/nnsvs/wavenet/wavenet.py | import torch
from nnsvs.wavenet.modules import Conv1d1x1, ResSkipBlock
from torch import nn
from torch.nn import functional as F
class WaveNet(nn.Module):
"""WaveNet
Args:
in_dim (int): the dimension of the input
out_dim (int): the dimension of the output
layers (int): the number of layers
stacks (int): the number of residual stacks
residual_channels (int): the number of residual channels
gate_channels (int): the number of channels for the gating function
skip_out_channels (int): the number of channels in the skip output
kernel_size (int): the size of the convolutional kernel
"""
def __init__(
self,
in_dim=334,
out_dim=206,
layers=10,
stacks=1,
residual_channels=64,
gate_channels=128,
skip_out_channels=64,
kernel_size=3,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.first_conv = Conv1d1x1(out_dim, residual_channels)
self.main_conv_layers = nn.ModuleList()
layers_per_stack = layers // stacks
for layer in range(layers):
dilation = 2 ** (layer % layers_per_stack)
conv = ResSkipBlock(
residual_channels,
gate_channels,
kernel_size,
skip_out_channels,
dilation=dilation,
cin_channels=in_dim,
)
self.main_conv_layers.append(conv)
self.last_conv_layers = nn.ModuleList(
[
nn.ReLU(),
Conv1d1x1(skip_out_channels, skip_out_channels),
nn.ReLU(),
Conv1d1x1(skip_out_channels, out_dim),
]
)
def forward(self, c, x, lengths=None):
"""Forward step
Args:
c (torch.Tensor): the conditional features (B, T, C)
x (torch.Tensor): the target features (B, T, C)
Returns:
torch.Tensor: the output waveform
"""
x = x.transpose(1, 2)
c = c.transpose(1, 2)
x = self.first_conv(x)
skips = 0
for f in self.main_conv_layers:
x, h = f(x, c)
skips += h
x = skips
for f in self.last_conv_layers:
x = f(x)
# (B, C, T) -> (B, T, C)
x = x.transpose(1, 2)
return x
def inference(self, c, num_time_steps=100, tqdm=lambda x: x):
"""Inference step
Args:
c (torch.Tensor): the local conditioning feature (B, T, C)
num_time_steps (int): the number of time steps to generate
tqdm (lambda): a tqdm function to track progress
Returns:
torch.Tensor: the output waveform
"""
self.clear_buffer()
# Local conditioning
B = c.shape[0]
outputs = []
# 自己回帰生成における初期値
current_input = torch.zeros(B, 1, self.out_dim).to(c.device)
if tqdm is None:
ts = range(num_time_steps)
else:
ts = tqdm(range(num_time_steps))
# 逐次的に生成
for t in ts:
# 時刻 t における入力は、時刻 t-1 における出力
if t > 0:
current_input = outputs[-1]
# 時刻 t における条件付け特徴量
ct = c[:, t, :].unsqueeze(1)
x = current_input
x = self.first_conv.incremental_forward(x)
skips = 0
for f in self.main_conv_layers:
x, h = f.incremental_forward(x, ct)
skips += h
x = skips
for f in self.last_conv_layers:
if hasattr(f, "incremental_forward"):
x = f.incremental_forward(x)
else:
x = f(x)
# Softmax によって、出力をカテゴリカル分布のパラメータに変換
x = F.softmax(x.view(B, -1), dim=1)
# カテゴリカル分布からサンプリング
x = torch.distributions.OneHotCategorical(x).sample()
outputs += [x.data]
# T x B x C
# 各時刻における出力を結合
outputs = torch.stack(outputs)
# B x T x C
outputs = outputs.transpose(0, 1).contiguous()
self.clear_buffer()
return outputs
def clear_buffer(self):
"""Clear the internal buffer."""
self.first_conv.clear_buffer()
for f in self.main_conv_layers:
f.clear_buffer()
for f in self.last_conv_layers:
try:
f.clear_buffer()
except AttributeError:
pass
def remove_weight_norm_(self):
"""Remove weight normalization of the model"""
def _remove_weight_norm(m):
try:
torch.nn.utils.remove_weight_norm(m)
except ValueError:
return
self.apply(_remove_weight_norm)
| 4,862 | 27.273256 | 75 | py |
nnsvs | nnsvs-master/nnsvs/wavenet/conv.py | import torch
from packaging import version
from torch import nn
from torch.nn import functional as F
torch_is_ge_180 = version.parse(torch.__version__) >= version.parse("1.8.0")
class Conv1d(nn.Conv1d):
"""Extended nn.Conv1d for incremental dilated convolutions"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.clear_buffer()
self._linearized_weight = None
if torch_is_ge_180:
self.register_full_backward_hook(self._clear_linearized_weight)
else:
self.register_backward_hook(self._clear_linearized_weight)
def incremental_forward(self, input):
# input: (B, T, C)
if self.training:
raise RuntimeError("incremental_forward only supports eval mode")
# run forward pre hooks (e.g., weight norm)
for hook in self._forward_pre_hooks.values():
hook(self, input)
# reshape weight
weight = self._get_linearized_weight()
kw = self.kernel_size[0]
dilation = self.dilation[0]
bsz = input.size(0) # input: bsz x len x dim
if kw > 1:
input = input.data
if self.input_buffer is None:
self.input_buffer = input.new(
bsz, kw + (kw - 1) * (dilation - 1), input.size(2)
)
self.input_buffer.zero_()
else:
# shift buffer
self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()
# append next input
self.input_buffer[:, -1, :] = input[:, -1, :]
input = self.input_buffer
if dilation > 1:
input = input[:, 0::dilation, :].contiguous()
with torch.no_grad():
output = F.linear(input.view(bsz, -1), weight, self.bias)
return output.view(bsz, 1, -1)
def clear_buffer(self):
self.input_buffer = None
def _get_linearized_weight(self):
if self._linearized_weight is None:
kw = self.kernel_size[0]
# nn.Conv1d
assert self.weight.size() == (self.out_channels, self.in_channels, kw)
weight = self.weight.transpose(1, 2).contiguous()
assert weight.size() == (self.out_channels, kw, self.in_channels)
self._linearized_weight = weight.view(self.out_channels, -1)
return self._linearized_weight
def _clear_linearized_weight(self, *args):
self._linearized_weight = None
| 2,508 | 34.842857 | 82 | py |
nnsvs | nnsvs-master/nnsvs/tacotron/postnet.py | # Acknowledgement: some of the code was adapted from ESPnet
# Copyright 2019 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from torch import nn
class Postnet(nn.Module):
"""Post-Net of Tacotron 2
Args:
in_dim (int): dimension of input
layers (int): number of layers
channels (int): number of channels
kernel_size (int): kernel size
dropout (float): dropout rate
"""
def __init__(
self,
in_dim,
layers=5,
channels=512,
kernel_size=5,
dropout=0.5,
):
super().__init__()
postnet = nn.ModuleList()
for layer in range(layers):
in_channels = in_dim if layer == 0 else channels
out_channels = in_dim if layer == layers - 1 else channels
postnet += [
nn.Conv1d(
in_channels,
out_channels,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
bias=False,
),
nn.BatchNorm1d(out_channels),
]
if layer != layers - 1:
postnet += [nn.Tanh()]
postnet += [nn.Dropout(dropout)]
self.postnet = nn.Sequential(*postnet)
def forward(self, xs):
"""Forward step
Args:
xs (torch.Tensor): input sequence
Returns:
torch.Tensor: output sequence
"""
return self.postnet(xs)
| 1,571 | 25.644068 | 70 | py |
nnsvs | nnsvs-master/nnsvs/tacotron/encoder.py | # The code was adapted from ttslearn https://github.com/r9y9/ttslearn
# Acknowledgement: some of the code was adapted from ESPnet
# Copyright 2019 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
def encoder_init(m):
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight, nn.init.calculate_gain("relu"))
class Encoder(nn.Module):
"""Encoder of Tacotron 2
Args:
in_dim (int): dimension of embeddings
hidden_dim (int): dimension of hidden units
conv_layers (int): number of convolutional layers
conv_channels (int): number of convolutional channels
conv_kernel_size (int): size of convolutional kernel
dropout (float): dropout rate
"""
def __init__(
self,
in_dim=512,
hidden_dim=512,
conv_layers=3,
conv_channels=512,
conv_kernel_size=5,
dropout=0.5,
):
super(Encoder, self).__init__()
convs = nn.ModuleList()
for layer in range(conv_layers):
in_channels = in_dim if layer == 0 else conv_channels
convs += [
nn.Conv1d(
in_channels,
conv_channels,
conv_kernel_size,
padding=(conv_kernel_size - 1) // 2,
bias=False,
),
nn.BatchNorm1d(conv_channels),
nn.ReLU(),
nn.Dropout(dropout),
]
self.convs = nn.Sequential(*convs)
self.blstm = nn.LSTM(
conv_channels, hidden_dim // 2, 1, batch_first=True, bidirectional=True
)
self.apply(encoder_init)
def forward(self, seqs, in_lens):
"""Forward step
Args:
seqs (torch.Tensor): input sequences (B, T, C)
in_lens (torch.Tensor): input sequence lengths
Returns:
torch.Tensor: encoded sequences
"""
out = self.convs(seqs.transpose(1, 2)).transpose(1, 2)
if not isinstance(in_lens, list):
in_lens = in_lens.to("cpu")
out = pack_padded_sequence(out, in_lens, batch_first=True)
out, _ = self.blstm(out)
out, _ = pad_packed_sequence(out, batch_first=True)
return out
| 2,409 | 29.897436 | 83 | py |
nnsvs | nnsvs-master/nnsvs/tacotron/decoder.py | # The code was adapted from ttslearn https://github.com/r9y9/ttslearn
# NonAttentiveDecoder is added to the original code.
# Acknowledgement: some of the code was adapted from ESPnet
# Copyright 2019 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch
import torch.nn.functional as F
from nnsvs.base import BaseModel, PredictionType
from nnsvs.mdn import MDNLayer, mdn_get_most_probable_sigma_and_mu, mdn_get_sample
from nnsvs.util import init_weights
from torch import nn
def decoder_init(m):
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight, nn.init.calculate_gain("tanh"))
class ZoneOutCell(nn.Module):
def __init__(self, cell, zoneout=0.1):
super().__init__()
self.cell = cell
self.hidden_size = cell.hidden_size
self.zoneout = zoneout
def forward(self, inputs, hidden):
next_hidden = self.cell(inputs, hidden)
next_hidden = self._zoneout(hidden, next_hidden, self.zoneout)
return next_hidden
def _zoneout(self, h, next_h, prob):
h_0, c_0 = h
h_1, c_1 = next_h
h_1 = self._apply_zoneout(h_0, h_1, prob)
c_1 = self._apply_zoneout(c_0, c_1, prob)
return h_1, c_1
def _apply_zoneout(self, h, next_h, prob):
if self.training:
if prob > 0.0:
mask = h.new(*h.size()).bernoulli_(prob)
else:
mask = 0
return mask * h + (1 - mask) * next_h
else:
return prob * h + (1 - prob) * next_h
class Prenet(nn.Module):
"""Pre-Net of Tacotron.
Args:
in_dim (int) : dimension of input
layers (int) : number of pre-net layers
hidden_dim (int) : dimension of hidden layer
dropout (float) : dropout rate
"""
def __init__(
self, in_dim, layers=2, hidden_dim=256, dropout=0.5, eval_dropout=True
):
super().__init__()
self.dropout = dropout
self.eval_dropout = eval_dropout
prenet = nn.ModuleList()
for layer in range(layers):
prenet += [
nn.Linear(in_dim if layer == 0 else hidden_dim, hidden_dim),
nn.ReLU(),
]
self.prenet = nn.Sequential(*prenet)
def forward(self, x):
"""Forward step
Args:
x (torch.Tensor) : input tensor
Returns:
torch.Tensor : output tensor
"""
for layer in self.prenet:
if self.eval_dropout:
x = F.dropout(layer(x), self.dropout, training=True)
else:
x = F.dropout(layer(x), self.dropout, training=self.training)
return x
class NonAttentiveDecoder(BaseModel):
"""Decoder of Tacotron w/o attention mechanism
Args:
in_dim (int) : dimension of encoder hidden layer
out_dim (int) : dimension of output
layers (int) : number of LSTM layers
hidden_dim (int) : dimension of hidden layer
prenet_layers (int) : number of pre-net layers
prenet_hidden_dim (int) : dimension of pre-net hidden layer
prenet_dropout (float) : dropout rate of pre-net
zoneout (float) : zoneout rate
reduction_factor (int) : reduction factor
attention_hidden_dim (int) : dimension of attention hidden layer
attention_conv_channel (int) : number of attention convolution channels
attention_conv_kernel_size (int) : kernel size of attention convolution
downsample_by_conv (bool) : if True, downsample by convolution
initial_value (float) : initial value for the autoregressive decoder.
"""
def __init__(
self,
in_dim=512,
out_dim=80,
layers=2,
hidden_dim=1024,
prenet_layers=2,
prenet_hidden_dim=256,
prenet_dropout=0.5,
zoneout=0.1,
reduction_factor=1,
downsample_by_conv=False,
init_type="none",
eval_dropout=True,
prenet_noise_std=0.0,
initial_value=0.0,
):
super().__init__()
self.out_dim = out_dim
self.reduction_factor = reduction_factor
self.prenet_dropout = prenet_dropout
self.prenet_noise_std = prenet_noise_std
self.initial_value = initial_value
if prenet_layers > 0:
self.prenet = Prenet(
out_dim,
prenet_layers,
prenet_hidden_dim,
prenet_dropout,
eval_dropout=eval_dropout,
)
lstm_in_dim = in_dim + prenet_hidden_dim
else:
self.prenet = None
prenet_hidden_dim = 0
lstm_in_dim = in_dim + out_dim
self.lstm = nn.ModuleList()
for layer in range(layers):
lstm = nn.LSTMCell(
lstm_in_dim if layer == 0 else hidden_dim,
hidden_dim,
)
self.lstm += [ZoneOutCell(lstm, zoneout)]
proj_in_dim = in_dim + hidden_dim
self.feat_out = nn.Linear(proj_in_dim, out_dim * reduction_factor, bias=False)
if reduction_factor > 1 and downsample_by_conv:
self.conv_downsample = nn.Conv1d(
in_dim,
in_dim,
kernel_size=reduction_factor,
stride=reduction_factor,
groups=in_dim,
)
else:
self.conv_downsample = None
init_weights(self, init_type)
def _zero_state(self, hs):
init_hs = hs.new_zeros(hs.size(0), self.lstm[0].hidden_size)
return init_hs
def is_autoregressive(self):
return True
def forward(self, encoder_outs, in_lens, decoder_targets=None):
"""Forward step
Args:
encoder_outs (torch.Tensor): encoder outputs (B, T, C)
in_lens (torch.Tensor): input lengths
decoder_targets (torch.Tensor): decoder targets for teacher-forcing. (B, T, C)
Returns:
torch.Tensor: the output (B, C, T)
"""
is_inference = decoder_targets is None
if not is_inference:
assert encoder_outs.shape[1] == decoder_targets.shape[1]
# Adjust number of frames according to the reduction factor
# (B, Lmax, out_dim) -> (B, Lmax/r, out_dim)
if self.reduction_factor > 1 and not is_inference:
decoder_targets = decoder_targets[
:, self.reduction_factor - 1 :: self.reduction_factor
]
if self.reduction_factor > 1:
if self.conv_downsample is not None:
encoder_outs = self.conv_downsample(
encoder_outs.transpose(1, 2)
).transpose(1, 2)
else:
encoder_outs = encoder_outs[
:, self.reduction_factor - 1 :: self.reduction_factor
]
h_list, c_list = [], []
for _ in range(len(self.lstm)):
h_list.append(self._zero_state(encoder_outs))
c_list.append(self._zero_state(encoder_outs))
go_frame = (
encoder_outs.new_zeros(encoder_outs.size(0), self.out_dim)
+ self.initial_value
)
prev_out = go_frame
if not is_inference and self.prenet is not None:
prenet_outs = self.prenet(decoder_targets)
outs = []
for t in range(encoder_outs.shape[1]):
# Pre-Net
if self.prenet is not None:
if is_inference:
prenet_out = self.prenet(prev_out)
else:
prenet_out = prenet_outs[:, t, :]
elif self.prenet_noise_std > 0:
prenet_out = (
prev_out + torch.randn_like(prev_out) * self.prenet_noise_std
)
else:
prenet_out = F.dropout(prev_out, self.prenet_dropout, training=True)
# LSTM
xs = torch.cat([encoder_outs[:, t], prenet_out], dim=1)
h_list[0], c_list[0] = self.lstm[0](xs, (h_list[0], c_list[0]))
for i in range(1, len(self.lstm)):
h_list[i], c_list[i] = self.lstm[i](
h_list[i - 1], (h_list[i], c_list[i])
)
# Output
hcs = torch.cat([h_list[-1], encoder_outs[:, t]], dim=1)
outs.append(self.feat_out(hcs).view(encoder_outs.size(0), self.out_dim, -1))
# Update decoder input for the next time step
if is_inference:
prev_out = outs[-1][:, :, -1] # (1, out_dim)
else:
# Teacher forcing
prev_out = decoder_targets[:, t, :]
outs = torch.cat(outs, dim=2) # (B, out_dim, Lmax)
if self.reduction_factor > 1:
outs = outs.view(outs.size(0), self.out_dim, -1) # (B, out_dim, Lmax)
# (B, C, T) -> (B, T, C)
return outs.transpose(1, 2)
class MDNNonAttentiveDecoder(BaseModel):
"""Non-atteive decoder with MDN
Each decoder step outputs the parameters of MDN.
Args:
in_dim (int): input dimension
out_dim (int): output dimension
layers (int): number of LSTM layers
hidden_dim (int): hidden dimension
prenet_layers (int): number of prenet layers
prenet_hidden_dim (int): prenet hidden dimension
prenet_dropout (float): prenet dropout rate
zoneout (float): zoneout rate
reduction_factor (int): reduction factor
downsample_by_conv (bool): if True, use conv1d to downsample the input
num_gaussians (int): number of Gaussians
sampling_mode (str): sampling mode
init_type (str): initialization type
eval_dropout (bool): if True, use dropout in evaluation
initial_value (float) : initial value for the autoregressive decoder.
"""
def __init__(
self,
in_dim=512,
out_dim=80,
layers=2,
hidden_dim=1024,
prenet_layers=2,
prenet_hidden_dim=256,
prenet_dropout=0.5,
zoneout=0.1,
reduction_factor=1,
downsample_by_conv=False,
num_gaussians=8,
sampling_mode="mean",
init_type="none",
eval_dropout=True,
prenet_noise_std=0.0,
initial_value=0.0,
):
super().__init__()
self.out_dim = out_dim
self.reduction_factor = reduction_factor
self.prenet_dropout = prenet_dropout
self.prenet_noise_std = prenet_noise_std
self.num_gaussians = num_gaussians
self.sampling_mode = sampling_mode
assert sampling_mode in ["mean", "random"]
self.initial_value = initial_value
if prenet_layers > 0:
self.prenet = Prenet(
out_dim,
prenet_layers,
prenet_hidden_dim,
prenet_dropout,
eval_dropout=eval_dropout,
)
lstm_in_dim = in_dim + prenet_hidden_dim
else:
self.prenet = None
prenet_hidden_dim = 0
lstm_in_dim = in_dim + out_dim
self.lstm = nn.ModuleList()
for layer in range(layers):
lstm = nn.LSTMCell(
lstm_in_dim if layer == 0 else hidden_dim,
hidden_dim,
)
self.lstm += [ZoneOutCell(lstm, zoneout)]
proj_in_dim = in_dim + hidden_dim
self.feat_out = MDNLayer(
proj_in_dim,
out_dim * reduction_factor,
num_gaussians=num_gaussians,
dim_wise=True,
)
if reduction_factor > 1 and downsample_by_conv:
self.conv_downsample = nn.Conv1d(
in_dim,
in_dim,
kernel_size=reduction_factor,
stride=reduction_factor,
groups=in_dim,
)
else:
self.conv_downsample = None
init_weights(self, init_type)
def _zero_state(self, hs):
init_hs = hs.new_zeros(hs.size(0), self.lstm[0].hidden_size)
return init_hs
def is_autoregressive(self):
return True
def prediction_type(self):
return PredictionType.PROBABILISTIC
def forward(self, encoder_outs, in_lens, decoder_targets=None):
is_inference = decoder_targets is None
if not is_inference:
assert encoder_outs.shape[1] == decoder_targets.shape[1]
# Adjust number of frames according to the reduction factor
# (B, Lmax, out_dim) -> (B, Lmax/r, out_dim)
if self.reduction_factor > 1 and not is_inference:
decoder_targets = decoder_targets[
:, self.reduction_factor - 1 :: self.reduction_factor
]
if self.reduction_factor > 1:
if self.conv_downsample is not None:
encoder_outs = self.conv_downsample(
encoder_outs.transpose(1, 2)
).transpose(1, 2)
else:
encoder_outs = encoder_outs[
:, self.reduction_factor - 1 :: self.reduction_factor
]
h_list, c_list = [], []
for _ in range(len(self.lstm)):
h_list.append(self._zero_state(encoder_outs))
c_list.append(self._zero_state(encoder_outs))
go_frame = (
encoder_outs.new_zeros(encoder_outs.size(0), self.out_dim)
+ self.initial_value
)
prev_out = go_frame
if not is_inference and self.prenet is not None:
prenet_outs = self.prenet(decoder_targets)
mus = []
log_pis = []
log_sigmas = []
# NOTE: only used for inference
mus_inf = []
for t in range(encoder_outs.shape[1]):
# Pre-Net
if self.prenet is not None:
if is_inference:
prenet_out = self.prenet(prev_out)
else:
prenet_out = prenet_outs[:, t, :]
elif self.prenet_noise_std > 0:
prenet_out = (
prev_out + torch.randn_like(prev_out) * self.prenet_noise_std
)
else:
prenet_out = F.dropout(prev_out, self.prenet_dropout, training=True)
# LSTM
xs = torch.cat([encoder_outs[:, t], prenet_out], dim=1)
h_list[0], c_list[0] = self.lstm[0](xs, (h_list[0], c_list[0]))
for i in range(1, len(self.lstm)):
h_list[i], c_list[i] = self.lstm[i](
h_list[i - 1], (h_list[i], c_list[i])
)
# Output
hcs = torch.cat([h_list[-1], encoder_outs[:, t]], dim=1)
log_pi, log_sigma, mu = self.feat_out(hcs.unsqueeze(1))
# (B, 1, num_gaussians, out_dim*reduction_factor)
# -> (B, reduction_factor, num_gaussians, out_dim)
log_pi = (
log_pi.transpose(1, 2)
.view(encoder_outs.size(0), self.num_gaussians, -1, self.out_dim)
.transpose(1, 2)
)
log_sigma = (
log_sigma.transpose(1, 2)
.view(encoder_outs.size(0), self.num_gaussians, -1, self.out_dim)
.transpose(1, 2)
)
mu = (
mu.transpose(1, 2)
.view(encoder_outs.size(0), self.num_gaussians, -1, self.out_dim)
.transpose(1, 2)
)
mus.append(mu)
log_pis.append(log_pi)
log_sigmas.append(log_sigma)
# Update decoder input for the next time step
if is_inference:
# (B, reduction_factor, out_dim)
if self.sampling_mode == "mean":
_, mu = mdn_get_most_probable_sigma_and_mu(log_pi, log_sigma, mu)
elif self.sampling_mode == "random":
mu = mdn_get_sample(log_pi, log_sigma, mu)
# Feed last sample for the feedback loop
prev_out = mu[:, -1]
mus_inf.append(mu)
else:
# Teacher forcing
prev_out = decoder_targets[:, t, :]
mus = torch.cat(mus, dim=1) # (B, out_dim, Lmax)
log_pis = torch.cat(log_pis, dim=1) # (B, out_dim, Lmax)
log_sigmas = torch.cat(log_sigmas, dim=1) # (B, out_dim, Lmax)
if is_inference:
mu = torch.cat(mus_inf, dim=1)
# TODO: may need to track sigma. For now we only use mu
return mu, mu
else:
return log_pis, log_sigmas, mus
| 16,660 | 33.281893 | 90 | py |
nnsvs | nnsvs-master/nnsvs/usfgan/__init__.py | import numpy as np
import torch
from nnsvs.usfgan.utils import SignalGenerator, dilated_factor
from torch import nn
class USFGANWrapper(nn.Module):
def __init__(self, config, generator):
super().__init__()
self.generator = generator
self.config = config
def inference(self, f0, aux_feats):
"""Inference for USFGAN
Args:
f0 (numpy.ndarray): F0 (T, 1)
aux_feats (Tensor): Auxiliary features (T, C)
"""
signal_generator = SignalGenerator(
sample_rate=self.config.data.sample_rate,
hop_size=self.config.data.hop_size,
sine_amp=self.config.data.sine_amp,
noise_amp=self.config.data.noise_amp,
signal_types=self.config.data.signal_types,
)
assert self.config.data.sine_f0_type in ["contf0", "cf0", "f0"]
assert self.config.data.df_f0_type in ["contf0", "cf0", "f0"]
device = aux_feats.device
is_sifigan = "aux_context_window" not in self.config.generator
if is_sifigan:
dfs = []
for df, us in zip(
self.config.data.dense_factors,
np.cumprod(self.config.generator.upsample_scales),
):
dfs += [
np.repeat(
dilated_factor(f0.copy(), self.config.data.sample_rate, df), us
)
]
df = [
torch.FloatTensor(np.array(df)).view(1, 1, -1).to(device) for df in dfs
]
c = aux_feats.unsqueeze(0).transpose(2, 1).to(device)
else:
df = dilated_factor(
np.squeeze(f0.copy()),
self.config.data.sample_rate,
self.config.data.dense_factor,
)
df = df.repeat(self.config.data.hop_size, axis=0)
pad_fn = nn.ReplicationPad1d(self.config.generator.aux_context_window)
c = pad_fn(aux_feats.unsqueeze(0).transpose(2, 1)).to(device)
df = torch.FloatTensor(df).view(1, 1, -1).to(device)
f0 = torch.FloatTensor(f0).unsqueeze(0).transpose(2, 1).to(device)
in_signal = signal_generator(f0)
y = self.generator(in_signal, c, df)[0]
return y
| 2,281 | 33.575758 | 87 | py |
nnsvs | nnsvs-master/nnsvs/usfgan/models/discriminator.py | # -*- coding: utf-8 -*-
# Copyright 2022 Reo Yoneyama (Nagoya University)
# MIT License (https://opensource.org/licenses/MIT)
"""Discriminator modules.
References:
- https://github.com/bigpon/QPPWG
- https://github.com/jik876/hifi-gan
"""
import copy
from logging import getLogger
from tkinter import W
# A logger for this file
logger = getLogger(__name__)
import torch
import torch.nn as nn
import torch.nn.functional as F
from nnsvs.usfgan.layers import Conv1d, Conv2d
from torchaudio.functional import spectrogram
class PWGDiscriminator(nn.Module):
"""Parallel WaveGAN Discriminator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
kernel_size=3,
layers=10,
conv_channels=64,
dilation_factor=1,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
bias=True,
use_weight_norm=True,
):
"""Initialize Parallel WaveGAN Discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Number of output channels.
layers (int): Number of conv layers.
conv_channels (int): Number of chnn layers.
dilation_factor (int): Dilation factor. For example, if dilation_factor = 2,
the dilation will be 2, 4, 8, ..., and so on.
nonlinear_activation (str): Nonlinear function after each conv.
nonlinear_activation_params (dict): Nonlinear function parameters
bias (bool): Whether to use bias parameter in conv.
use_weight_norm (bool) Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super(PWGDiscriminator, self).__init__()
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
assert dilation_factor > 0, "Dilation factor must be > 0."
self.conv_layers = nn.ModuleList()
conv_in_channels = in_channels
for i in range(layers - 1):
if i == 0:
dilation = 1
else:
dilation = i if dilation_factor == 1 else dilation_factor ** i
conv_in_channels = conv_channels
padding = (kernel_size - 1) // 2 * dilation
conv_layer = [
Conv1d(
conv_in_channels,
conv_channels,
kernel_size=kernel_size,
padding=padding,
dilation=dilation,
bias=bias,
),
getattr(nn, nonlinear_activation)(
inplace=True, **nonlinear_activation_params
),
]
self.conv_layers += conv_layer
padding = (kernel_size - 1) // 2
conv_last_layer = Conv1d(
conv_in_channels,
out_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias,
)
self.conv_layers += [conv_last_layer]
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x, return_fmaps=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
return_fmaps (bool): Whether to return feature maps.
Returns:
Tensor: Output tensor (B, 1, T)
"""
fmaps = []
for f in self.conv_layers:
x = f(x)
if return_fmaps:
fmaps.append(x)
if return_fmaps:
return [x], fmaps
else:
return [x]
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d):
nn.utils.weight_norm(m)
logger.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logger.debug(f"Weight norm is removed from {m}.")
nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
class HiFiGANPeriodDiscriminator(nn.Module):
"""HiFiGAN period discriminator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
period=3,
kernel_sizes=[5, 3],
channels=32,
downsample_scales=[3, 3, 3, 3, 1],
max_downsample_channels=1024,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
use_spectral_norm=False,
):
"""Initialize HiFiGANPeriodDiscriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
period (int): Period.
kernel_sizes (list): Kernel sizes of initial conv layers and the final conv layer.
channels (int): Number of initial channels.
downsample_scales (list): List of downsampling scales.
max_downsample_channels (int): Number of maximum downsampling channels.
use_additional_convs (bool): Whether to use additional conv layers in residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_spectral_norm (bool): Whether to use spectral norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1, "Kernel size must be odd number."
assert kernel_sizes[1] % 2 == 1, "Kernel size must be odd number."
self.period = period
self.convs = nn.ModuleList()
in_chs = in_channels
out_chs = channels
for downsample_scale in downsample_scales:
self.convs += [
nn.Sequential(
nn.Conv2d(
in_chs,
out_chs,
(kernel_sizes[0], 1),
(downsample_scale, 1),
padding=((kernel_sizes[0] - 1) // 2, 0),
),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
in_chs = out_chs
# NOTE(kan-bayashi): Use downsample_scale + 1?
out_chs = min(out_chs * 4, max_downsample_channels)
self.output_conv = nn.Conv2d(
out_chs,
out_channels,
(kernel_sizes[1] - 1, 1),
1,
padding=((kernel_sizes[1] - 1) // 2, 0),
)
if use_weight_norm and use_spectral_norm:
raise ValueError("Either use use_weight_norm or use_spectral_norm.")
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# apply spectral norm
if use_spectral_norm:
self.apply_spectral_norm()
def forward(self, x, return_fmaps=False):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
return_fmaps (bool): Whether to return feature maps.
Returns:
list: List of each layer's tensors.
"""
# transform 1d to 2d -> (B, C, T/P, P)
b, c, t = x.shape
if t % self.period != 0:
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t += n_pad
x = x.view(b, c, t // self.period, self.period)
# forward conv
fmap = []
for f in self.convs:
x = f(x)
if return_fmaps:
fmap.append(x)
x = self.output_conv(x)
out = torch.flatten(x, 1, -1)
if return_fmaps:
return out, fmap
else:
return out
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, nn.Conv2d):
nn.utils.weight_norm(m)
logger.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def apply_spectral_norm(self):
"""Apply spectral normalization module from all of the layers."""
def _apply_spectral_norm(m):
if isinstance(m, nn.Conv2d):
nn.utils.spectral_norm(m)
logger.debug(f"Spectral norm is applied to {m}.")
self.apply(_apply_spectral_norm)
class HiFiGANMultiPeriodDiscriminator(nn.Module):
"""HiFiGAN multi-period discriminator module."""
def __init__(
self,
periods=[2, 3, 5, 7, 11],
discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
):
"""Initialize HiFiGANMultiPeriodDiscriminator module.
Args:
periods (list): List of periods.
discriminator_params (dict): Parameters for hifi-gan period discriminator module.
The period parameter will be overwritten.
"""
super().__init__()
self.discriminators = nn.ModuleList()
for period in periods:
params = copy.deepcopy(discriminator_params)
params["period"] = period
self.discriminators += [HiFiGANPeriodDiscriminator(**params)]
def forward(self, x, return_fmaps=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
return_fmaps (bool): Whether to return feature maps.
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs, fmaps = [], []
for f in self.discriminators:
if return_fmaps:
out, fmap = f(x, return_fmaps)
fmaps.extend(fmap)
else:
out = f(x)
outs.append(out)
if return_fmaps:
return outs, fmaps
else:
return outs
class HiFiGANScaleDiscriminator(nn.Module):
"""HiFi-GAN scale discriminator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
kernel_sizes=[15, 41, 5, 3],
channels=128,
max_downsample_channels=1024,
max_groups=16,
bias=True,
downsample_scales=[2, 2, 4, 4, 1],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
use_spectral_norm=False,
):
"""Initialize HiFiGAN scale discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_sizes (list): List of four kernel sizes. The first will be used for the first conv layer,
and the second is for downsampling part, and the remaining two are for output layers.
channels (int): Initial number of channels for conv layer.
max_downsample_channels (int): Maximum number of channels for downsampling layers.
bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_spectral_norm (bool): Whether to use spectral norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
self.layers = nn.ModuleList()
# check kernel size is valid
assert len(kernel_sizes) == 4
for ks in kernel_sizes:
assert ks % 2 == 1
# add first layer
self.layers += [
nn.Sequential(
nn.Conv1d(
in_channels,
channels,
# NOTE(kan-bayashi): Use always the same kernel size
kernel_sizes[0],
bias=bias,
padding=(kernel_sizes[0] - 1) // 2,
),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
# add downsample layers
in_chs = channels
out_chs = channels
# NOTE(kan-bayashi): Remove hard coding?
groups = 4
for downsample_scale in downsample_scales:
self.layers += [
nn.Sequential(
nn.Conv1d(
in_chs,
out_chs,
kernel_size=kernel_sizes[1],
stride=downsample_scale,
padding=(kernel_sizes[1] - 1) // 2,
groups=groups,
bias=bias,
),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
in_chs = out_chs
# NOTE(kan-bayashi): Remove hard coding?
out_chs = min(in_chs * 2, max_downsample_channels)
# NOTE(kan-bayashi): Remove hard coding?
groups = min(groups * 4, max_groups)
# add final layers
out_chs = min(in_chs * 2, max_downsample_channels)
self.layers += [
nn.Sequential(
nn.Conv1d(
in_chs,
out_chs,
kernel_size=kernel_sizes[2],
stride=1,
padding=(kernel_sizes[2] - 1) // 2,
bias=bias,
),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
self.last_layer = nn.Conv1d(
out_chs,
out_channels,
kernel_size=kernel_sizes[3],
stride=1,
padding=(kernel_sizes[3] - 1) // 2,
bias=bias,
)
if use_weight_norm and use_spectral_norm:
raise ValueError("Either use use_weight_norm or use_spectral_norm.")
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# apply spectral norm
if use_spectral_norm:
self.apply_spectral_norm()
def forward(self, x, return_fmaps=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
return_fmaps (bool): Whether to return feature maps.
Returns:
List: List of output tensors of each layer.
"""
fmap = []
for f in self.layers:
x = f(x)
if return_fmaps:
fmap.append(x)
out = self.last_layer(x)
if return_fmaps:
return out, fmap
else:
return out
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, nn.Conv2d):
nn.utils.weight_norm(m)
logger.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def apply_spectral_norm(self):
"""Apply spectral normalization module from all of the layers."""
def _apply_spectral_norm(m):
if isinstance(m, nn.Conv2d):
nn.utils.spectral_norm(m)
logger.debug(f"Spectral norm is applied to {m}.")
self.apply(_apply_spectral_norm)
class HiFiGANMultiScaleDiscriminator(nn.Module):
"""HiFi-GAN multi-scale discriminator module."""
def __init__(
self,
scales=3,
downsample_pooling="AvgPool1d",
# follow the official implementation setting
downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm=False,
):
"""Initialize HiFiGAN multi-scale discriminator module.
Args:
scales (int): Number of multi-scales.
downsample_pooling (str): Pooling module name for downsampling of the inputs.
downsample_pooling_params (dict): Parameters for the above pooling module.
discriminator_params (dict): Parameters for hifi-gan scale discriminator module.
follow_official_norm (bool): Whether to follow the norm setting of the official
implementation. The first discriminator uses spectral norm and the other
discriminators use weight norm.
"""
super().__init__()
self.discriminators = nn.ModuleList()
# add discriminators
for i in range(scales):
params = copy.deepcopy(discriminator_params)
if follow_official_norm:
if i == 0:
params["use_weight_norm"] = False
params["use_spectral_norm"] = True
else:
params["use_weight_norm"] = True
params["use_spectral_norm"] = False
self.discriminators += [HiFiGANScaleDiscriminator(**params)]
self.pooling = getattr(nn, downsample_pooling)(**downsample_pooling_params)
def forward(self, x, return_fmaps=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
return_fmaps (bool): Whether to return feature maps.
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs, fmaps = [], []
for f in self.discriminators:
if return_fmaps:
out, fmap = f(x, return_fmaps)
fmaps.extend(fmap)
else:
out = f(x)
outs.append(out)
x = self.pooling(x)
if return_fmaps:
return outs, fmaps
else:
return outs
class HiFiGANMultiScaleMultiPeriodDiscriminator(nn.Module):
"""HiFi-GAN multi-scale + multi-period discriminator module."""
def __init__(
self,
# Multi-scale discriminator related
scales=3,
scale_downsample_pooling="AvgPool1d",
scale_downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
scale_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm=True,
# Multi-period discriminator related
periods=[2, 3, 5, 7, 11],
period_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
):
"""Initialize HiFiGAN multi-scale + multi-period discriminator module.
Args:
scales (int): Number of multi-scales.
scale_downsample_pooling (str): Pooling module name for downsampling of the inputs.
scale_downsample_pooling_params (dict): Parameters for the above pooling module.
scale_discriminator_params (dict): Parameters for hifi-gan scale discriminator module.
follow_official_norm (bool): Whether to follow the norm setting of the official
implementation. The first discriminator uses spectral norm and the other
discriminators use weight norm.
periods (list): List of periods.
period_discriminator_params (dict): Parameters for hifi-gan period discriminator module.
The period parameter will be overwritten.
"""
super().__init__()
self.msd = HiFiGANMultiScaleDiscriminator(
scales=scales,
downsample_pooling=scale_downsample_pooling,
downsample_pooling_params=scale_downsample_pooling_params,
discriminator_params=scale_discriminator_params,
follow_official_norm=follow_official_norm,
)
self.mpd = HiFiGANMultiPeriodDiscriminator(
periods=periods,
discriminator_params=period_discriminator_params,
)
def forward(self, x, return_fmaps=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
return_fmaps (bool): Whether to return feature maps.
Returns:
List: List of list of each discriminator outputs,
which consists of each layer output tensors.
Multi scale and multi period ones are concatenated.
"""
if return_fmaps:
msd_outs, msd_fmaps = self.msd(x, return_fmaps)
mpd_outs, mpd_fmaps = self.mpd(x, return_fmaps)
outs = msd_outs + mpd_outs
fmaps = msd_fmaps + mpd_fmaps
return outs, fmaps
else:
msd_outs = self.msd(x)
mpd_outs = self.mpd(x)
outs = msd_outs + mpd_outs
return outs
class UnivNetSpectralDiscriminator(nn.Module):
"""UnivNet spectral discriminator module."""
def __init__(
self,
fft_size,
hop_size,
win_length,
window="hann_window",
kernel_sizes=[(3, 9), (3, 9), (3, 9), (3, 9), (3, 3), (3, 3)],
strides=[(1, 1), (1, 2), (1, 2), (1, 2), (1, 1), (1, 1)],
channels=32,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
use_weight_norm=True,
):
"""Initialize HiFiGAN scale discriminator module.
Args:
kernel_sizes (list): List of four kernel sizes. The first will be used for the first conv layer,
and the second is for downsampling part, and the remaining two are for output layers.
strides (list):
channels (int): Initial number of channels for conv layer.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
self.fft_size = fft_size
self.hop_size = hop_size
self.win_length = win_length
self.register_buffer("window", getattr(torch, window)(win_length))
self.layers = nn.ModuleList()
# check kernel size is valid
assert len(kernel_sizes) == len(strides)
# add first layer
self.layers += [
nn.Sequential(
nn.Conv2d(
1,
channels,
kernel_sizes[0],
stride=strides[0],
bias=bias,
),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
for i in range(1, len(kernel_sizes) - 2):
self.layers += [
nn.Sequential(
nn.Conv2d(
channels,
channels,
kernel_size=kernel_sizes[i],
stride=strides[i],
bias=bias,
),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
# add final layers
self.layers += [
nn.Sequential(
nn.Conv2d(
channels,
channels,
kernel_size=kernel_sizes[-2],
stride=strides[-2],
bias=bias,
),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
self.layers += [
nn.Conv2d(
channels,
1,
kernel_size=kernel_sizes[-1],
stride=strides[-1],
bias=bias,
)
]
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x, return_fmaps=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
return_fmaps (bool): Whether to return feature maps.
Returns:
List: List of output tensors of each layer.
"""
x = spectrogram(
x,
pad=self.win_length // 2,
window=self.window,
n_fft=self.fft_size,
hop_length=self.hop_size,
win_length=self.win_length,
power=1.0,
normalized=False,
).transpose(-1, -2)
fmap = []
for f in self.layers:
x = f(x)
if return_fmaps:
fmap.append(x)
if return_fmaps:
return x, fmap
else:
return x
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, nn.Conv2d):
nn.utils.weight_norm(m)
logger.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
class UnivNetMultiResolutionSpectralDiscriminator(nn.Module):
"""UnivNet multi-resolution spectral discriminator module."""
def __init__(
self,
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window",
discriminator_params={
"channels": 32,
"kernel_sizes": [(3, 9), (3, 9), (3, 9), (3, 9), (3, 3), (3, 3)],
"strides": [(1, 1), (1, 2), (1, 2), (1, 2), (1, 1), (1, 1)],
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.2},
},
):
"""Initialize UnivNetMultiResolutionSpectralDiscriminator module.
Args:
scales (int): Number of multi-scales.
downsample_pooling (str): Pooling module name for downsampling of the inputs.
downsample_pooling_params (dict): Parameters for the above pooling module.
discriminator_params (dict): Parameters for hifi-gan scale discriminator module.
follow_official_norm (bool): Whether to follow the norm setting of the official
implementation. The first discriminator uses spectral norm and the other
discriminators use weight norm.
"""
super().__init__()
assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
self.discriminators = nn.ModuleList()
# add discriminators
for i in range(len(fft_sizes)):
params = copy.deepcopy(discriminator_params)
self.discriminators += [
UnivNetSpectralDiscriminator(
fft_size=fft_sizes[i],
hop_size=hop_sizes[i],
win_length=win_lengths[i],
window=window,
**params,
)
]
def forward(self, x, return_fmaps=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
return_fmaps (bool): Whether to return feature maps.
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs, fmaps = [], []
for f in self.discriminators:
if return_fmaps:
out, fmap = f(x, return_fmaps)
fmaps.extend(fmap)
else:
out = f(x)
outs.append(out)
if return_fmaps:
return outs, fmaps
else:
return outs
class UnivNetMultiResolutionMultiPeriodDiscriminator(nn.Module):
"""UnivNet multi-resolution + multi-period discriminator module."""
def __init__(
self,
# Multi-resolution discriminator related
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window",
spectral_discriminator_params={
"channels": 32,
"kernel_sizes": [(3, 9), (3, 9), (3, 9), (3, 9), (3, 3), (3, 3)],
"strides": [(1, 1), (1, 2), (1, 2), (1, 2), (1, 1), (1, 1)],
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.2},
},
# Multi-period discriminator related
periods=[2, 3, 5, 7, 11],
period_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
):
"""Initialize UnivNetMultiResolutionMultiPeriodDiscriminator module.
Args:
sperctral_discriminator_params (dict): Parameters for hifi-gan scale discriminator module.
periods (list): List of periods.
period_discriminator_params (dict): Parameters for hifi-gan period discriminator module.
The period parameter will be overwritten.
"""
super().__init__()
self.mrd = UnivNetMultiResolutionSpectralDiscriminator(
fft_sizes=fft_sizes,
hop_sizes=hop_sizes,
win_lengths=win_lengths,
window=window,
discriminator_params=spectral_discriminator_params,
)
self.mpd = HiFiGANMultiPeriodDiscriminator(
periods=periods,
discriminator_params=period_discriminator_params,
)
def forward(self, x, return_fmaps=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
return_fmaps (bool): Whether to return feature maps.
Returns:
List: List of list of each discriminator outputs,
which consists of each layer output tensors.
Multi scale and multi period ones are concatenated.
"""
if return_fmaps:
mrd_outs, mrd_fmaps = self.mrd(x, return_fmaps)
mpd_outs, mpd_fmaps = self.mpd(x, return_fmaps)
outs = mrd_outs + mpd_outs
fmaps = mrd_fmaps + mpd_fmaps
return outs, fmaps
else:
mrd_outs = self.mrd(x)
mpd_outs = self.mpd(x)
outs = mrd_outs + mpd_outs
return outs
| 33,567 | 33.218145 | 108 | py |
nnsvs | nnsvs-master/nnsvs/usfgan/models/generator.py | # -*- coding: utf-8 -*-
# Copyright 2022 Reo Yoneyama (Nagoya University)
# MIT License (https://opensource.org/licenses/MIT)
"""Unified Source-Filter GAN Generator modules."""
from logging import getLogger
import torch
import torch.nn as nn
from nnsvs.usfgan.layers import Conv1d1x1, ResidualBlocks, upsample
from nnsvs.usfgan.layers.residual_block import PeriodicityEstimator
from nnsvs.usfgan.utils import index_initial
# A logger for this file
logger = getLogger(__name__)
class USFGANGenerator(nn.Module):
"""Unified Source-Filter GAN Generator module."""
def __init__(
self,
source_network_params={
"blockA": 30,
"cycleA": 3,
"blockF": 0,
"cycleF": 0,
"cascade_mode": 0,
},
filter_network_params={
"blockA": 0,
"cycleA": 0,
"blockF": 30,
"cycleF": 3,
"cascade_mode": 0,
},
in_channels=1,
out_channels=1,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
aux_context_window=2,
use_weight_norm=True,
upsample_params={"upsample_scales": [5, 4, 3, 2]},
):
"""Initialize USFGANGenerator module.
Args:
source_network_params (dict): Source-network parameters.
filter_network_params (dict): Filter-network parameters.
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
residual_channels (int): Number of channels in residual conv.
gate_channels (int): Number of channels in gated conv.
skip_channels (int): Number of channels in skip conv.
aux_channels (int): Number of channels for auxiliary feature conv.
aux_context_window (int): Context window size for auxiliary feature.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
upsample_params (dict): Upsampling network parameters.
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aux_channels = aux_channels
self.n_ch = residual_channels
# define first convolution
self.conv_first = Conv1d1x1(in_channels, residual_channels)
# define upsampling network
self.upsample_net = getattr(upsample, "ConvInUpsampleNetwork")(
**upsample_params,
aux_channels=aux_channels,
aux_context_window=aux_context_window,
)
# define source/filter networks
for params in [
source_network_params,
filter_network_params,
]:
params.update(
{
"residual_channels": residual_channels,
"gate_channels": gate_channels,
"skip_channels": skip_channels,
"aux_channels": aux_channels,
}
)
self.source_network = ResidualBlocks(**source_network_params)
self.filter_network = ResidualBlocks(**filter_network_params)
# convert source signal to hidden representation
self.conv_mid = Conv1d1x1(out_channels, skip_channels)
# convert hidden representation to output signal
self.conv_last = nn.Sequential(
nn.ReLU(),
Conv1d1x1(skip_channels, skip_channels),
nn.ReLU(),
Conv1d1x1(skip_channels, out_channels),
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x, c, d):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
c (Tensor): Local conditioning auxiliary features (B, C ,T').
d (Tensor): Input pitch-dependent dilated factors (B, 1, T).
Returns:
Tensor: Output tensor (B, 1, T)
"""
# index initialization
batch_index, ch_index = index_initial(x.size(0), self.n_ch)
# perform upsampling
c = self.upsample_net(c)
assert c.size(-1) == x.size(-1)
# encode to hidden representation
x = self.conv_first(x)
# source excitation generation
x = self.source_network(x, c, d, batch_index, ch_index)
s = self.conv_last(x)
x = self.conv_mid(s)
# resonance filtering
x = self.filter_network(x, c, d, batch_index, ch_index)
x = self.conv_last(x)
return x, s
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logger.debug(f"Weight norm is removed from {m}.")
nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d):
nn.utils.weight_norm(m)
logger.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
class CascadeHnUSFGANGenerator(nn.Module):
"""Cascade hn-uSFGAN Generator module."""
def __init__(
self,
harmonic_network_params={
"blockA": 20,
"cycleA": 4,
"blockF": 0,
"cycleF": 0,
"cascade_mode": 0,
},
noise_network_params={
"blockA": 0,
"cycleA": 0,
"blockF": 5,
"cycleF": 5,
"cascade_mode": 0,
},
filter_network_params={
"blockA": 0,
"cycleA": 0,
"blockF": 30,
"cycleF": 3,
"cascade_mode": 0,
},
periodicity_estimator_params={
"conv_blocks": 3,
"kernel_size": 5,
"dilation": 1,
"padding_mode": "replicate",
},
in_channels=1,
out_channels=1,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
aux_context_window=2,
use_weight_norm=True,
upsample_params={"upsample_scales": [5, 4, 3, 2]},
):
"""Initialize CascadeHnUSFGANGenerator module.
Args:
harmonic_network_params (dict): Periodic source generation network parameters.
noise_network_params (dict): Aperiodic source generation network parameters.
filter_network_params (dict): Filter network parameters.
periodicity_estimator_params (dict): Periodicity estimation network parameters.
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
residual_channels (int): Number of channels in residual conv.
gate_channels (int): Number of channels in gated conv.
skip_channels (int): Number of channels in skip conv.
aux_channels (int): Number of channels for auxiliary feature conv.
aux_context_window (int): Context window size for auxiliary feature.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
upsample_params (dict): Upsampling network parameters.
"""
super(CascadeHnUSFGANGenerator, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aux_channels = aux_channels
self.n_ch = residual_channels
# define first convolution
self.conv_first_sine = Conv1d1x1(in_channels, residual_channels)
self.conv_first_noise = Conv1d1x1(in_channels, residual_channels)
self.conv_merge = Conv1d1x1(residual_channels * 2, residual_channels)
# define upsampling network
self.upsample_net = getattr(upsample, "ConvInUpsampleNetwork")(
**upsample_params,
aux_channels=aux_channels,
aux_context_window=aux_context_window,
)
# define harmonic/noise/filter networks
for params in [
harmonic_network_params,
noise_network_params,
filter_network_params,
]:
params.update(
{
"residual_channels": residual_channels,
"gate_channels": gate_channels,
"skip_channels": skip_channels,
"aux_channels": aux_channels,
}
)
self.harmonic_network = ResidualBlocks(**harmonic_network_params)
self.noise_network = ResidualBlocks(**noise_network_params)
self.filter_network = ResidualBlocks(**filter_network_params)
# define periodicity estimator
self.periodicity_estimator = PeriodicityEstimator(
**periodicity_estimator_params, in_channels=aux_channels
)
# convert hidden representation to output signal
self.conv_last = nn.Sequential(
nn.ReLU(),
Conv1d1x1(skip_channels, skip_channels),
nn.ReLU(),
Conv1d1x1(skip_channels, out_channels),
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x, c, d):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
c (Tensor): Local conditioning auxiliary features (B, C ,T').
d (Tensor): Input pitch-dependent dilated factors (B, 1, T).
Returns:
Tensor: Output tensor (B, 1, T)
"""
# index initialization
batch_index, ch_index = index_initial(x.size(0), self.n_ch)
# upsample auxiliary features
c = self.upsample_net(c)
assert c.size(-1) == x.size(-1)
# estimate periodicity
a = self.periodicity_estimator(c)
# assume the first channel is sine and the other is noise
sine, noise = torch.chunk(x, 2, 1)
# encode to hidden representation
h = self.conv_first_sine(sine)
n = self.conv_first_noise(noise)
# generate periodic and aperiodic source latent features
h = self.harmonic_network(h, c, d, batch_index, ch_index)
h = a * h
n = self.conv_merge(torch.cat([h, n], dim=1))
n = self.noise_network(n, c, d, batch_index, ch_index)
n = (1.0 - a) * n
# merge periodic and aperiodic latent features
s = h + n
# resonance filtering
x = self.filter_network(s, c, d, batch_index, ch_index)
x = self.conv_last(x)
# convert to 1d signal for regularization loss
s = self.conv_last(s)
# just for debug
with torch.no_grad():
h = self.conv_last(h)
n = self.conv_last(n)
return x, s, h, n, a
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logger.debug(f"Weight norm is removed from {m}.")
nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d):
nn.utils.weight_norm(m)
logger.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
class ParallelHnUSFGANGenerator(nn.Module):
"""Parallel hn-uSFGAN Generator module."""
def __init__(
self,
harmonic_network_params={
"blockA": 20,
"cycleA": 4,
"blockF": 0,
"cycleF": 0,
"cascade_mode": 0,
},
noise_network_params={
"blockA": 0,
"cycleA": 0,
"blockF": 5,
"cycleF": 5,
"cascade_mode": 0,
},
filter_network_params={
"blockA": 0,
"cycleA": 0,
"blockF": 30,
"cycleF": 3,
"cascade_mode": 0,
},
periodicity_estimator_params={
"conv_blocks": 3,
"kernel_size": 5,
"dilation": 1,
"padding_mode": "replicate",
},
in_channels=1,
out_channels=1,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
aux_context_window=2,
use_weight_norm=True,
upsample_params={"upsample_scales": [5, 4, 3, 2]},
):
"""Initialize ParallelHnUSFGANGenerator module.
Args:
harmonic_network_params (dict): Periodic source generation network parameters.
noise_network_params (dict): Aperiodic source generation network parameters.
filter_network_params (dict): Filter network parameters.
periodicity_estimator_params (dict): Periodicity estimation network parameters.
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
residual_channels (int): Number of channels in residual conv.
gate_channels (int): Number of channels in gated conv.
skip_channels (int): Number of channels in skip conv.
aux_channels (int): Number of channels for auxiliary feature conv.
aux_context_window (int): Context window size for auxiliary feature.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
upsample_params (dict): Upsampling network parameters.
"""
super(ParallelHnUSFGANGenerator, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aux_channels = aux_channels
self.n_ch = residual_channels
# define first convolution
self.conv_first_sine = Conv1d1x1(in_channels, residual_channels)
self.conv_first_noise = Conv1d1x1(in_channels, residual_channels)
# define upsampling network
self.upsample_net = getattr(upsample, "ConvInUpsampleNetwork")(
**upsample_params,
aux_channels=aux_channels,
aux_context_window=aux_context_window,
)
# define harmonic/noise/filter networks
for params in [
harmonic_network_params,
noise_network_params,
filter_network_params,
]:
params.update(
{
"residual_channels": residual_channels,
"gate_channels": gate_channels,
"skip_channels": skip_channels,
"aux_channels": aux_channels,
}
)
self.harmonic_network = ResidualBlocks(**harmonic_network_params)
self.noise_network = ResidualBlocks(**noise_network_params)
self.filter_network = ResidualBlocks(**filter_network_params)
# define periodicity estimator
self.periodicity_estimator = PeriodicityEstimator(
**periodicity_estimator_params, in_channels=aux_channels
)
# convert hidden representation to output signal
self.conv_last = nn.Sequential(
nn.ReLU(),
Conv1d1x1(skip_channels, skip_channels),
nn.ReLU(),
Conv1d1x1(skip_channels, out_channels),
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x, c, d):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
c (Tensor): Local conditioning auxiliary features (B, C ,T').
d (Tensor): Input pitch-dependent dilated factors (B, 1, T).
Returns:
Tensor: Output tensor (B, 1, T)
"""
# index initialization
batch_index, ch_index = index_initial(x.size(0), self.n_ch)
# upsample auxiliary features
c = self.upsample_net(c)
assert c.size(-1) == x.size(-1)
# estimate periodicity
a = self.periodicity_estimator(c)
# assume the first channel is sine and the other is noise
sine, noise = torch.chunk(x, 2, 1)
# encode to hidden representation
h = self.conv_first_sine(sine)
n = self.conv_first_noise(noise)
# generate periodic and aperiodic source latent features
h = self.harmonic_network(h, c, d, batch_index, ch_index)
n = self.noise_network(n, c, d, batch_index, ch_index)
# merge periodic and aperiodic latent features
h = a * h
n = (1.0 - a) * n
s = h + n
# resonance filtering
x = self.filter_network(s, c, d, batch_index, ch_index)
x = self.conv_last(x)
# convert to 1d signal for regularization loss
s = self.conv_last(s)
# just for debug
with torch.no_grad():
h = self.conv_last(h)
n = self.conv_last(n)
return x, s, h, n, a
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logger.debug(f"Weight norm is removed from {m}.")
nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d):
nn.utils.weight_norm(m)
logger.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
| 18,402 | 32.766972 | 91 | py |
nnsvs | nnsvs-master/nnsvs/usfgan/layers/residual_block.py | # -*- coding: utf-8 -*-
# Copyright 2022 Reo Yoneyama (Nagoya University)
# MIT License (https://opensource.org/licenses/MIT)
"""Residual block modules.
References:
- https://github.com/bigpon/QPPWG
- https://github.com/kan-bayashi/ParallelWaveGAN
- https://github.com/r9y9/wavenet_vocoder
"""
import math
import sys
from logging import getLogger
import torch
import torch.nn as nn
from nnsvs.usfgan.utils import pd_indexing
# A logger for this file
logger = getLogger(__name__)
class Conv1d(nn.Conv1d):
"""Conv1d module with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv1d module."""
super(Conv1d, self).__init__(*args, **kwargs)
def reset_parameters(self):
"""Reset parameters."""
nn.init.kaiming_normal_(self.weight, nonlinearity="relu")
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class Conv1d1x1(Conv1d):
"""1x1 Conv1d with customized initialization."""
def __init__(self, in_channels, out_channels, bias=True):
"""Initialize 1x1 Conv1d module."""
super(Conv1d1x1, self).__init__(
in_channels, out_channels, kernel_size=1, padding=0, dilation=1, bias=bias
)
class Conv2d(nn.Conv2d):
"""Conv2d module with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv2d module."""
super(Conv2d, self).__init__(*args, **kwargs)
def reset_parameters(self):
"""Reset parameters."""
nn.init.kaiming_normal_(self.weight, mode="fan_out", nonlinearity="relu")
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class Conv2d1x1(Conv2d):
"""1x1 Conv2d with customized initialization."""
def __init__(self, in_channels, out_channels, bias=True):
"""Initialize 1x1 Conv2d module."""
super(Conv2d1x1, self).__init__(
in_channels, out_channels, kernel_size=1, padding=0, dilation=1, bias=bias
)
class FixedBlock(nn.Module):
"""Fixed block module in QPPWG."""
def __init__(
self,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
kernel_size=3,
dilation=1,
bias=True,
):
"""Initialize Fixed ResidualBlock module.
Args:
residual_channels (int): Number of channels for residual connection.
skip_channels (int): Number of channels for skip connection.
aux_channels (int): Local conditioning channels i.e. auxiliary input dimension.
dilation (int): Dilation size.
bias (bool): Whether to add bias parameter in convolution layers.
"""
super(FixedBlock, self).__init__()
padding = (kernel_size - 1) // 2 * dilation
# dilation conv
self.conv = Conv1d(
residual_channels,
gate_channels,
kernel_size,
padding=padding,
padding_mode="reflect",
dilation=dilation,
bias=bias,
)
# local conditioning
if aux_channels > 0:
self.conv1x1_aux = Conv1d1x1(aux_channels, gate_channels, bias=False)
else:
self.conv1x1_aux = None
# conv output is split into two groups
gate_out_channels = gate_channels // 2
self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, bias=bias)
self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_channels, bias=bias)
def forward(self, x, c):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, residual_channels, T).
c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T).
Returns:
Tensor: Output tensor for residual connection (B, residual_channels, T).
Tensor: Output tensor for skip connection (B, skip_channels, T).
"""
residual = x
x = self.conv(x)
# split into two part for gated activation
splitdim = 1
xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim)
# local conditioning
if c is not None:
assert self.conv1x1_aux is not None
c = self.conv1x1_aux(c)
ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim)
xa, xb = xa + ca, xb + cb
x = torch.tanh(xa) * torch.sigmoid(xb)
# for skip connection
s = self.conv1x1_skip(x)
# for residual connection
x = (self.conv1x1_out(x) + residual) * math.sqrt(0.5)
return x, s
class AdaptiveBlock(nn.Module):
"""Adaptive block module in QPPWG."""
def __init__(
self,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
bias=True,
):
"""Initialize Adaptive ResidualBlock module.
Args:
residual_channels (int): Number of channels for residual connection.
skip_channels (int): Number of channels for skip connection.
aux_channels (int): Local conditioning channels i.e. auxiliary input dimension.
bias (bool): Whether to add bias parameter in convolution layers.
"""
super(AdaptiveBlock, self).__init__()
# pitch-dependent dilation conv
self.convP = Conv1d1x1(residual_channels, gate_channels, bias=bias) # past
self.convC = Conv1d1x1(residual_channels, gate_channels, bias=bias) # current
self.convF = Conv1d1x1(residual_channels, gate_channels, bias=bias) # future
# local conditioning
if aux_channels > 0:
self.conv1x1_aux = Conv1d1x1(aux_channels, gate_channels, bias=False)
else:
self.conv1x1_aux = None
# conv output is split into two groups
gate_out_channels = gate_channels // 2
self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, bias=bias)
self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_channels, bias=bias)
def forward(self, xC, xP, xF, c):
"""Calculate forward propagation.
Args:
xC (Tensor): Current input tensor (B, residual_channels, T).
xP (Tensor): Past input tensor (B, residual_channels, T).
xF (Tensor): Future input tensor (B, residual_channels, T).
c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T).
Returns:
Tensor: Output tensor for residual connection (B, residual_channels, T).
Tensor: Output tensor for skip connection (B, skip_channels, T).
"""
residual = xC
x = self.convC(xC) + self.convP(xP) + self.convF(xF)
# split into two part for gated activation
splitdim = 1
xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim)
# local conditioning
if c is not None:
assert self.conv1x1_aux is not None
c = self.conv1x1_aux(c)
ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim)
xa, xb = xa + ca, xb + cb
x = torch.tanh(xa) * torch.sigmoid(xb)
# for skip connection
s = self.conv1x1_skip(x)
# for residual connection
x = (self.conv1x1_out(x) + residual) * math.sqrt(0.5)
return x, s
class ResidualBlocks(nn.Module):
"""Multiple residual blocks stacking module."""
def __init__(
self,
blockA,
cycleA,
blockF,
cycleF,
cascade_mode=0,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
):
"""Initialize ResidualBlocks module.
Args:
blockA (int): Number of adaptive residual blocks.
cycleA (int): Number of dilation cycles of adaptive residual blocks.
blockF (int): Number of fixed residual blocks.
cycleF (int): Number of dilation cycles of fixed residual blocks.
cascade_mode (int): Cascaded mode (0: Adaptive->Fixed; 1: Fixed->Adaptive).
residual_channels (int): Number of channels in residual conv.
gate_channels (int): Number of channels in gated conv.
skip_channels (int): Number of channels in skip conv.
aux_channels (int): Number of channels for auxiliary feature conv.
"""
super(ResidualBlocks, self).__init__()
# check the number of blocks and cycles
cycleA = max(cycleA, 1)
cycleF = max(cycleF, 1)
assert blockA % cycleA == 0
self.blockA_per_cycle = blockA // cycleA
assert blockF % cycleF == 0
blockF_per_cycle = blockF // cycleF
# define adaptive residual blocks
adaptive_blocks = nn.ModuleList()
for block in range(blockA):
conv = AdaptiveBlock(
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=aux_channels,
)
adaptive_blocks += [conv]
# define fixed residual blocks
fixed_blocks = nn.ModuleList()
for block in range(blockF):
dilation = 2 ** (block % blockF_per_cycle)
conv = FixedBlock(
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=aux_channels,
dilation=dilation,
)
fixed_blocks += [conv]
# define cascaded structure
if cascade_mode == 0: # adaptive->fixed
self.conv_dilated = adaptive_blocks.extend(fixed_blocks)
self.block_modes = [True] * blockA + [False] * blockF
elif cascade_mode == 1: # fixed->adaptive
self.conv_dilated = fixed_blocks.extend(adaptive_blocks)
self.block_modes = [False] * blockF + [True] * blockA
else:
logger.error(f"Cascaded mode {cascade_mode} is not supported!")
sys.exit(0)
def forward(self, x, c, d, batch_index, ch_index):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
c (Tensor): Local conditioning auxiliary features (B, C ,T).
d (Tensor): Input pitch-dependent dilated factors (B, 1, T).
Returns:
Tensor: Output tensor (B, residual_channels, T).
"""
skips = 0
blockA_idx = 0
for f, mode in zip(self.conv_dilated, self.block_modes):
if mode: # adaptive block
dilation = 2 ** (blockA_idx % self.blockA_per_cycle)
xP, xF = pd_indexing(x, d, dilation, batch_index, ch_index)
x, h = f(x, xP, xF, c)
blockA_idx += 1
else: # fixed block
x, h = f(x, c)
skips = h + skips
skips *= math.sqrt(1.0 / len(self.conv_dilated))
return x
class PeriodicityEstimator(nn.Module):
"""Periodicity estimator module."""
def __init__(
self,
in_channels,
residual_channels=64,
conv_layers=3,
kernel_size=5,
dilation=1,
padding_mode="replicate",
):
"""Initialize USFGANGenerator module.
Args:
in_channels (int): Number of input channels.
residual_channels (int): Number of channels in residual conv.
conv_layers (int): # Number of convolution layers.
kernel_size (int): Kernel size.
dilation (int): Dilation size.
padding_mode (str): Padding mode.
"""
super(PeriodicityEstimator, self).__init__()
modules = []
for idx in range(conv_layers):
conv1d = Conv1d(
in_channels,
residual_channels,
kernel_size=kernel_size,
dilation=dilation,
padding=kernel_size // 2 * dilation,
padding_mode=padding_mode,
)
# initialize the initial outputs sigmoid(0)=0.5 to stabilize training
if idx != conv_layers - 1:
nonlinear = nn.ReLU(inplace=True)
else:
# NOTE: zero init induces nan or inf if weight normalization is used
# nn.init.zeros_(conv1d.weight)
nn.init.normal_(conv1d.weight, std=1e-4)
nonlinear = nn.Sigmoid()
modules += [conv1d, nonlinear]
in_channels = residual_channels
self.layers = nn.Sequential(*modules)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input auxiliary features (B, C ,T).
Returns:
Tensor: Output tensor (B, residual_channels, T).
"""
return self.layers(x)
| 12,956 | 31.3925 | 91 | py |
nnsvs | nnsvs-master/nnsvs/usfgan/layers/cheaptrick.py | # -*- coding: utf-8 -*-
# Copyright 2022 Reo Yoneyama (Nagoya University)
# MIT License (https://opensource.org/licenses/MIT)
"""Spectral envelopes estimation module based on CheapTrick.
References:
- https://www.sciencedirect.com/science/article/pii/S0167639314000697
- https://github.com/mmorise/World
"""
import math
import torch
import torch.fft
import torch.nn as nn
class AdaptiveWindowing(nn.Module):
"""CheapTrick F0 adptive windowing module."""
def __init__(
self,
sampling_rate,
hop_size,
fft_size,
f0_floor,
f0_ceil,
):
"""Initialize AdaptiveWindowing module.
Args:
sampling_rate (int): Sampling rate.
hop_size (int): Hop size.
fft_size (int): FFT size.
f0_floor (int): Minimum value of F0.
f0_ceil (int): Maximum value of F0.
"""
super(AdaptiveWindowing, self).__init__()
self.sampling_rate = sampling_rate
self.hop_size = hop_size
self.fft_size = fft_size
self.register_buffer("window", torch.zeros((f0_ceil + 1, fft_size)))
self.zero_padding = nn.ConstantPad2d((fft_size // 2, fft_size // 2, 0, 0), 0)
# Pre-calculation of the window functions
for f0 in range(f0_floor, f0_ceil + 1):
half_win_len = round(1.5 * self.sampling_rate / f0)
base_index = torch.arange(
-half_win_len, half_win_len + 1, dtype=torch.int64
)
position = base_index / 1.5 / self.sampling_rate
left = fft_size // 2 - half_win_len
right = fft_size // 2 + half_win_len + 1
window = torch.zeros(fft_size)
window[left:right] = 0.5 * torch.cos(math.pi * position * f0) + 0.5
average = torch.sum(window * window).pow(0.5)
self.window[f0] = window / average
def forward(self, x, f, power=False):
"""Calculate forward propagation.
Args:
x (Tensor): Waveform (B, fft_size // 2 + 1, T).
f (Tensor): F0 sequence (B, T').
power (boot): Whether to use power or magnitude.
Returns:
Tensor: Power spectrogram (B, bin_size, T').
"""
# Get the matrix of window functions corresponding to F0
x = self.zero_padding(x).unfold(1, self.fft_size, self.hop_size)
windows = self.window[f]
# Adaptive windowing and calculate power spectrogram.
# In test, change x[:, : -1, :] to x.
x = torch.abs(torch.fft.rfft(x[:, :-1, :] * windows))
x = x.pow(2) if power else x
return x
class AdaptiveLiftering(nn.Module):
"""CheapTrick F0 adptive windowing module."""
def __init__(
self,
sampling_rate,
fft_size,
f0_floor,
f0_ceil,
q1=-0.15,
):
"""Initialize AdaptiveLiftering module.
Args:
sampling_rate (int): Sampling rate.
fft_size (int): FFT size.
f0_floor (int): Minimum value of F0.
f0_ceil (int): Maximum value of F0.
q1 (float): Parameter to remove effect of adjacent harmonics.
"""
super(AdaptiveLiftering, self).__init__()
self.sampling_rate = sampling_rate
self.bin_size = fft_size // 2 + 1
self.q1 = q1
self.q0 = 1.0 - 2.0 * q1
self.register_buffer(
"smoothing_lifter", torch.zeros((f0_ceil + 1, self.bin_size))
)
self.register_buffer(
"compensation_lifter", torch.zeros((f0_ceil + 1, self.bin_size))
)
# Pre-calculation of the smoothing lifters and compensation lifters
for f0 in range(f0_floor, f0_ceil + 1):
smoothing_lifter = torch.zeros(self.bin_size)
compensation_lifter = torch.zeros(self.bin_size)
quefrency = torch.arange(1, self.bin_size) / sampling_rate
smoothing_lifter[0] = 1.0
smoothing_lifter[1:] = torch.sin(math.pi * f0 * quefrency) / (
math.pi * f0 * quefrency
)
compensation_lifter[0] = self.q0 + 2.0 * self.q1
compensation_lifter[1:] = self.q0 + 2.0 * self.q1 * torch.cos(
2.0 * math.pi * f0 * quefrency
)
self.smoothing_lifter[f0] = smoothing_lifter
self.compensation_lifter[f0] = compensation_lifter
def forward(self, x, f, elim_0th=False):
"""Calculate forward propagation.
Args:
x (Tensor): Power spectrogram (B, bin_size, T').
f (Tensor): F0 sequence (B, T').
elim_0th (bool): Whether to eliminate cepstram 0th component.
Returns:
Tensor: Estimated spectral envelope (B, bin_size, T').
"""
# Setting the smoothing lifter and compensation lifter
smoothing_lifter = self.smoothing_lifter[f]
compensation_lifter = self.compensation_lifter[f]
# Calculating cepstrum
tmp = torch.cat((x, torch.flip(x[:, :, 1:-1], [2])), dim=2)
cepstrum = torch.fft.rfft(torch.log(torch.clamp(tmp, min=1e-7))).real
# Set the 0th cepstrum to 0
if elim_0th:
cepstrum[..., 0] = 0
# Liftering cepstrum with the lifters
liftered_cepstrum = cepstrum * smoothing_lifter * compensation_lifter
# Return the result to the spectral domain
x = torch.fft.irfft(liftered_cepstrum)[:, :, : self.bin_size]
return x
class CheapTrick(nn.Module):
"""CheapTrick based spectral envelope estimation module."""
def __init__(
self,
sampling_rate,
hop_size,
fft_size,
f0_floor=70,
f0_ceil=340,
uv_threshold=0,
q1=-0.15,
):
"""Initialize AdaptiveLiftering module.
Args:
sampling_rate (int): Sampling rate.
hop_size (int): Hop size.
fft_size (int): FFT size.
f0_floor (int): Minimum value of F0.
f0_ceil (int): Maximum value of F0.
uv_threshold (float): V/UV determining threshold.
q1 (float): Parameter to remove effect of adjacent harmonics.
"""
super(CheapTrick, self).__init__()
# fft_size must be larger than 3.0 * sampling_rate / f0_floor
assert fft_size > 3.0 * sampling_rate / f0_floor
self.f0_floor = f0_floor
self.f0_ceil = f0_ceil
self.uv_threshold = uv_threshold
self.ada_wind = AdaptiveWindowing(
sampling_rate,
hop_size,
fft_size,
f0_floor,
f0_ceil,
)
self.ada_lift = AdaptiveLiftering(
sampling_rate,
fft_size,
f0_floor,
f0_ceil,
q1,
)
def forward(self, x, f, power=False, elim_0th=False):
"""Calculate forward propagation.
Args:
x (Tensor): Power spectrogram (B, T).
f (Tensor): F0 sequence (B, T').
power (boot): Whether to use power or magnitude spectrogram.
elim_0th (bool): Whether to eliminate cepstram 0th component.
Returns:
Tensor: Estimated spectral envelope (B, bin_size, T').
"""
# Step0: Round F0 values to integers.
voiced = (f > self.uv_threshold) * torch.ones_like(f)
f = voiced * f + (1.0 - voiced) * self.f0_ceil
f = torch.round(torch.clamp(f, min=self.f0_floor, max=self.f0_ceil)).to(
torch.int64
)
# Step1: Adaptive windowing and calculate power or amplitude spectrogram.
x = self.ada_wind(x, f, power)
# Step3: Smoothing (log axis) and spectral recovery on the cepstrum domain.
x = self.ada_lift(x, f, elim_0th)
return x
| 7,834 | 30.849593 | 85 | py |
nnsvs | nnsvs-master/nnsvs/usfgan/layers/upsample.py | # -*- coding: utf-8 -*-
"""Upsampling module.
This code is modified from https://github.com/r9y9/wavenet_vocoder.
"""
import numpy as np
import torch
import torch.nn.functional as F
from nnsvs.usfgan.layers import Conv1d
class Stretch2d(torch.nn.Module):
"""Stretch2d module."""
def __init__(self, x_scale, y_scale, mode="nearest"):
"""Initialize Stretch2d module.
Args:
x_scale (int): X scaling factor (Time axis in spectrogram).
y_scale (int): Y scaling factor (Frequency axis in spectrogram).
mode (str): Interpolation mode.
"""
super(Stretch2d, self).__init__()
self.x_scale = x_scale
self.y_scale = y_scale
self.mode = mode
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, C, F, T).
Returns:
Tensor: Interpolated tensor (B, C, F * y_scale, T * x_scale)
"""
return F.interpolate(
x, scale_factor=(self.y_scale, self.x_scale), mode=self.mode
)
class Conv2d(torch.nn.Conv2d):
"""Conv2d module with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv2d module."""
super(Conv2d, self).__init__(*args, **kwargs)
def reset_parameters(self):
"""Reset parameters."""
self.weight.data.fill_(1.0 / np.prod(self.kernel_size))
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
class UpsampleNetwork(torch.nn.Module):
"""Upsampling network module."""
def __init__(
self,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
use_causal_conv=False,
):
"""Initialize upsampling network module.
Args:
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
interpolate_mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
"""
super(UpsampleNetwork, self).__init__()
self.use_causal_conv = use_causal_conv
self.up_layers = torch.nn.ModuleList()
for scale in upsample_scales:
# interpolation layer
stretch = Stretch2d(scale, 1, interpolate_mode)
self.up_layers += [stretch]
# conv layer
assert (
freq_axis_kernel_size - 1
) % 2 == 0, "Not support even number freq axis kernel size."
freq_axis_padding = (freq_axis_kernel_size - 1) // 2
kernel_size = (freq_axis_kernel_size, scale * 2 + 1)
if use_causal_conv:
padding = (freq_axis_padding, scale * 2)
else:
padding = (freq_axis_padding, scale)
conv = Conv2d(1, 1, kernel_size=kernel_size, padding=padding, bias=False)
self.up_layers += [conv]
# nonlinear
if nonlinear_activation is not None:
nonlinear = getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
)
self.up_layers += [nonlinear]
def forward(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, C, T).
Returns:
Tensor: Upsampled tensor (B, C, T'), where T' = T * prod(upsample_scales).
"""
c = c.unsqueeze(1) # (B, 1, C, T)
for f in self.up_layers:
if self.use_causal_conv and isinstance(f, Conv2d):
c = f(c)[..., : c.size(-1)]
else:
c = f(c)
return c.squeeze(1) # (B, C, T')
class ConvInUpsampleNetwork(torch.nn.Module):
"""Convolution + upsampling network module."""
def __init__(
self,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
aux_channels=80,
aux_context_window=0,
use_causal_conv=False,
):
"""Initialize convolution + upsampling network module.
Args:
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
aux_channels (int): Number of channels of pre-convolutional layer.
aux_context_window (int): Context window size of the pre-convolutional layer.
use_causal_conv (bool): Whether to use causal structure.
"""
super(ConvInUpsampleNetwork, self).__init__()
self.aux_context_window = aux_context_window
self.use_causal_conv = use_causal_conv and aux_context_window > 0
# To capture wide-context information in conditional features
kernel_size = (
aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1
)
# NOTE(kan-bayashi): Here do not use padding because the input is already padded
self.conv_in = Conv1d(
aux_channels, aux_channels, kernel_size=kernel_size, bias=False
)
self.upsample = UpsampleNetwork(
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
use_causal_conv=use_causal_conv,
)
def forward(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, C, T').
Returns:
Tensor: Upsampled tensor (B, C, T),
where T = (T' - aux_context_window * 2) * prod(upsample_scales).
Note:
The length of inputs considers the context window size.
"""
c_ = self.conv_in(c)
c = c_[:, :, : -self.aux_context_window] if self.use_causal_conv else c_
return self.upsample(c)
| 6,484 | 32.25641 | 92 | py |
nnsvs | nnsvs-master/nnsvs/usfgan/utils/features.py | # -*- coding: utf-8 -*-
# Copyright 2022 Reo Yoneyama (Nagoya University)
# MIT License (https://opensource.org/licenses/MIT)
"""Feature-related functions.
References:
- https://github.com/bigpon/QPPWG
"""
import sys
from logging import getLogger
import numpy as np
import torch
from torch.nn.functional import interpolate
# A logger for this file
logger = getLogger(__name__)
def validate_length(x, y, hop_size=None):
"""Validate length
Args:
x (ndarray): numpy array with x.shape[0] = len_x
y (ndarray): numpy array with y.shape[0] = len_y
hop_size (int): upsampling factor
Returns:
(ndarray): length adjusted x with same length y
(ndarray): length adjusted y with same length x
"""
if hop_size is None:
if x.shape[0] < y.shape[0]:
y = y[: x.shape[0]]
if x.shape[0] > y.shape[0]:
x = x[: y.shape[0]]
assert len(x) == len(y)
else:
if x.shape[0] > y.shape[0] * hop_size:
x = x[: y.shape[0] * hop_size]
if x.shape[0] < y.shape[0] * hop_size:
mod_y = y.shape[0] * hop_size - x.shape[0]
mod_y_frame = mod_y // hop_size + 1
y = y[:-mod_y_frame]
x = x[: y.shape[0] * hop_size]
assert len(x) == len(y) * hop_size
return x, y
def dilated_factor(batch_f0, fs, dense_factor):
"""Pitch-dependent dilated factor
Args:
batch_f0 (ndarray): the f0 sequence (T)
fs (int): sampling rate
dense_factor (int): the number of taps in one cycle
Return:
dilated_factors(np array):
float array of the pitch-dependent dilated factors (T)
"""
batch_f0[batch_f0 == 0] = fs / dense_factor
dilated_factors = np.ones(batch_f0.shape) * fs
dilated_factors /= batch_f0
dilated_factors /= dense_factor
assert np.all(dilated_factors > 0)
return dilated_factors
class SignalGenerator:
"""Input signal generator module."""
def __init__(
self,
sample_rate=24000,
hop_size=120,
sine_amp=0.1,
noise_amp=0.003,
signal_types=["sine", "noise"],
):
"""Initialize WaveNetResidualBlock module.
Args:
sample_rate (int): Sampling rate.
hop_size (int): Hop size of input F0.
sine_amp (float): Sine amplitude for NSF-based sine generation.
noise_amp (float): Noise amplitude for NSF-based sine generation.
signal_types (list): List of input signal types for generator.
"""
self.sample_rate = sample_rate
self.hop_size = hop_size
self.signal_types = signal_types
self.sine_amp = sine_amp
self.noise_amp = noise_amp
# for signal_type in signal_types:
# if not signal_type in ["noise", "sine", "uv"]:
# logger.info(f"{signal_type} is not supported type for generator input.")
# sys.exit(0)
# logger.info(f"Use {signal_types} for generator input signals.")
@torch.no_grad()
def __call__(self, f0):
signals = []
for typ in self.signal_types:
if "noise" == typ:
signals.append(self.random_noise(f0))
if "sine" == typ:
signals.append(self.sinusoid(f0))
if "uv" == typ:
signals.append(self.vuv_binary(f0))
input_batch = signals[0]
for signal in signals[1:]:
input_batch = torch.cat([input_batch, signal], axis=1)
return input_batch
@torch.no_grad()
def random_noise(self, f0):
"""Calculate noise signals.
Args:
f0 (Tensor): F0 tensor (B, 1, T // hop_size).
Returns:
Tensor: Gaussian noise signals (B, 1, T).
"""
B, _, T = f0.size()
noise = torch.randn((B, 1, T * self.hop_size), device=f0.device)
return noise
@torch.no_grad()
def sinusoid(self, f0):
"""Calculate sine signals.
Args:
f0 (Tensor): F0 tensor (B, 1, T // hop_size).
Returns:
Tensor: Sines generated following NSF (B, 1, T).
"""
B, _, T = f0.size()
vuv = interpolate((f0 > 0) * torch.ones_like(f0), T * self.hop_size)
radious = (interpolate(f0, T * self.hop_size) / self.sample_rate) % 1
sine = vuv * torch.sin(torch.cumsum(radious, dim=2) * 2 * np.pi) * self.sine_amp
if self.noise_amp > 0:
noise_amp = vuv * self.noise_amp + (1.0 - vuv) * self.noise_amp / 3.0
noise = torch.randn((B, 1, T * self.hop_size), device=f0.device) * noise_amp
sine = sine + noise
return sine
@torch.no_grad()
def vuv_binary(self, f0):
"""Calculate V/UV binary sequences.
Args:
f0 (Tensor): F0 tensor (B, 1, T // hop_size).
Returns:
Tensor: V/UV binary sequences (B, 1, T).
"""
_, _, T = f0.size()
uv = interpolate((f0 > 0) * torch.ones_like(f0), T * self.hop_size)
return uv
| 5,103 | 27.198895 | 90 | py |
nnsvs | nnsvs-master/nnsvs/usfgan/utils/filters.py | # -*- coding: utf-8 -*-
# Copyright 2020 Yi-Chiao Wu (Nagoya University)
# based on a WaveNet script by Tomoki Hayashi (Nagoya University)
# (https://github.com/kan-bayashi/PytorchWaveNetVocoder)
# based on sprocket-vc script by Kazuhiro Kobayashi (Nagoya University)
# (https://github.com/k2kobayashi/sprocket)
# MIT License (https://opensource.org/licenses/MIT)
"""Filters."""
import numpy as np
from scipy.signal import firwin, lfilter
NUMTAPS = 255
def low_cut_filter(x, fs, cutoff=70):
"""Low-cut filter
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low cut filter
Return:
(ndarray): Low cut filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
numtaps = NUMTAPS
fil = firwin(numtaps, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def low_pass_filter(x, fs, cutoff=70):
"""Low-pass filter
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low pass filter
Return:
(ndarray): Low pass filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
numtaps = NUMTAPS
fil = firwin(numtaps, norm_cutoff, pass_zero=True)
x_pad = np.pad(x, (numtaps, numtaps), "edge")
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2 : -numtaps // 2]
return lpf_x
| 1,509 | 24.166667 | 71 | py |
nnsvs | nnsvs-master/nnsvs/usfgan/utils/index.py | # -*- coding: utf-8 -*-
# Copyright 2020 Yi-Chiao Wu (Nagoya University)
# MIT License (https://opensource.org/licenses/MIT)
"""Indexing-related functions."""
import torch
from torch.nn import ConstantPad1d as pad1d
def pd_indexing(x, d, dilation, batch_index, ch_index):
"""Pitch-dependent indexing of past and future samples.
Args:
x (Tensor): Input feature map (B, C, T).
d (Tensor): Input pitch-dependent dilated factors (B, 1, T).
dilation (Int): Dilation size.
batch_index (Tensor): Batch index
ch_index (Tensor): Channel index
Returns:
Tensor: Past output tensor (B, out_channels, T)
Tensor: Future output tensor (B, out_channels, T)
"""
(_, _, batch_length) = d.size()
dilations = d * dilation
# get past index
idxP = torch.arange(-batch_length, 0).float()
if torch.cuda.is_available():
idxP = idxP.cuda()
idxP = torch.add(-dilations, idxP)
idxP = idxP.round().long()
maxP = -((torch.min(idxP) + batch_length))
assert maxP >= 0
idxP = (batch_index, ch_index, idxP)
# padding past tensor
xP = pad1d((maxP, 0), 0)(x)
# get future index
idxF = torch.arange(0, batch_length).float()
if torch.cuda.is_available():
idxF = idxF.cuda()
idxF = torch.add(dilations, idxF)
idxF = idxF.round().long()
maxF = torch.max(idxF) - (batch_length - 1)
assert maxF >= 0
idxF = (batch_index, ch_index, idxF)
# padding future tensor
xF = pad1d((0, maxF), 0)(x)
return xP[idxP], xF[idxF]
def index_initial(n_batch, n_ch, tensor=True):
"""Tensor batch and channel index initialization.
Args:
n_batch (Int): Number of batch.
n_ch (Int): Number of channel.
tensor (bool): Return tensor or numpy array
Returns:
Tensor: Batch index
Tensor: Channel index
"""
batch_index = []
for i in range(n_batch):
batch_index.append([[i]] * n_ch)
ch_index = []
for i in range(n_ch):
ch_index += [[i]]
ch_index = [ch_index] * n_batch
if tensor:
batch_index = torch.tensor(batch_index)
ch_index = torch.tensor(ch_index)
if torch.cuda.is_available():
batch_index = batch_index.cuda()
ch_index = ch_index.cuda()
return batch_index, ch_index
| 2,349 | 26.647059 | 68 | py |
nnsvs | nnsvs-master/nnsvs/transformer/encoder.py | # The code was adapted from https://github.com/jaywalnut310/vits
import torch
from torch import nn
from torch.nn import functional as F
from .attentions import MultiHeadAttention, convert_pad_shape
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class FFN(nn.Module):
def __init__(
self,
in_channels,
out_channels,
filter_channels,
kernel_size,
p_dropout=0.0,
activation=None,
causal=False,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.activation = activation
self.causal = causal
if causal:
self.padding = self._causal_padding
else:
self.padding = self._same_padding
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
self.drop = nn.Dropout(p_dropout)
def forward(self, x, x_mask):
x = self.conv_1(self.padding(x * x_mask))
if self.activation == "gelu":
x = x * torch.sigmoid(1.702 * x)
else:
x = torch.relu(x)
x = self.drop(x)
x = self.conv_2(self.padding(x * x_mask))
return x * x_mask
def _causal_padding(self, x):
if self.kernel_size == 1:
return x
pad_l = self.kernel_size - 1
pad_r = 0
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
x = F.pad(x, convert_pad_shape(padding))
return x
def _same_padding(self, x):
if self.kernel_size == 1:
return x
pad_l = (self.kernel_size - 1) // 2
pad_r = self.kernel_size // 2
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
x = F.pad(x, convert_pad_shape(padding))
return x
class Encoder(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
window_size=4,
**kwargs,
):
super().__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for _ in range(self.n_layers):
self.attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
window_size=window_size,
)
)
self.norm_layers_1.append(LayerNorm(hidden_channels))
self.ffn_layers.append(
FFN(
hidden_channels,
hidden_channels,
filter_channels,
kernel_size,
p_dropout=p_dropout,
)
)
self.norm_layers_2.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask):
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
x = x * x_mask
for i in range(self.n_layers):
y = self.attn_layers[i](x, x, attn_mask)
y = self.drop(y)
x = self.norm_layers_1[i](x + y)
y = self.ffn_layers[i](x, x_mask)
y = self.drop(y)
x = self.norm_layers_2[i](x + y)
x = x * x_mask
return x
| 4,282 | 28.951049 | 78 | py |
nnsvs | nnsvs-master/nnsvs/transformer/attentions.py | # The code was adapted from https://github.com/jaywalnut310/vits
import math
import torch
from torch import nn
from torch.nn import functional as F
def convert_pad_shape(pad_shape):
ll = pad_shape[::-1]
pad_shape = [item for sublist in ll for item in sublist]
return pad_shape
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = length.max()
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
return x.unsqueeze(0) < length.unsqueeze(1)
class MultiHeadAttention(nn.Module):
def __init__(
self,
channels,
out_channels,
n_heads,
p_dropout=0.0,
window_size=None,
heads_share=True,
block_length=None,
proximal_bias=False,
proximal_init=False,
):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.p_dropout = p_dropout
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.proximal_init = proximal_init
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = nn.Parameter(
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
* rel_stddev
)
self.emb_rel_v = nn.Parameter(
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
* rel_stddev
)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
nn.init.xavier_uniform_(self.conv_v.weight)
if proximal_init:
with torch.no_grad():
self.conv_k.weight.copy_(self.conv_q.weight)
self.conv_k.bias.copy_(self.conv_q.bias)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
# reshape [b, d, t] -> [b, n_h, t, d_k]
b, d, t_s, t_t = (*key.size(), query.size(2))
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
if self.window_size is not None:
assert (
t_s == t_t
), "Relative attention is only available for self-attention."
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(
query / math.sqrt(self.k_channels), key_relative_embeddings
)
scores_local = self._relative_position_to_absolute_position(rel_logits)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, "Proximal bias is only available for self-attention."
scores = scores + self._attention_bias_proximal(t_s).to(
device=scores.device, dtype=scores.dtype
)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e4)
if self.block_length is not None:
assert (
t_s == t_t
), "Local attention is only available for self-attention."
block_mask = (
torch.ones_like(scores)
.triu(-self.block_length)
.tril(self.block_length)
)
scores = scores.masked_fill(block_mask == 0, -1e4)
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(p_attn)
value_relative_embeddings = self._get_relative_embeddings(
self.emb_rel_v, t_s
)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings
)
output = (
output.transpose(2, 3).contiguous().view(b, d, t_t)
) # [b, n_h, t_t, d_k] -> [b, d, t_t]
return output, p_attn
def _matmul_with_relative_values(self, x, y):
"""
x: [b, h, l, m]
y: [h or 1, m, d]
ret: [b, h, l, d]
"""
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
"""
x: [b, h, l, d]
y: [h or 1, m, d]
ret: [b, h, l, m]
"""
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
# Pad first before slice to avoid using cond ops.
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max((self.window_size + 1) - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(
relative_embeddings,
convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
)
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[
:, slice_start_position:slice_end_position
]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
"""
x: [b, h, l, 2*l-1]
ret: [b, h, l, l]
"""
batch, heads, length, _ = x.size()
# Concat columns of pad to shift from relative to absolute indexing.
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
# Concat extra elements so to add up to shape (len+1, 2*len-1).
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
# Reshape and slice out the padded elements.
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
:, :, :length, length - 1 :
]
return x_final
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
# padd along column
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
# add 0's in the beginning that will skew the elements after reshape
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
| 8,150 | 36.911628 | 88 | py |
nnsvs | nnsvs-master/nnsvs/diffsinger/diffusion.py | from collections import deque
from functools import partial
import numpy as np
import torch
from nnsvs.base import BaseModel, PredictionType
from tqdm import tqdm
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def noise_like(shape, noise_fn, device, repeat=False):
if repeat:
resid = [1] * (len(shape) - 1)
shape_one = (1, *shape[1:])
return noise_fn(*shape_one, device=device).repeat(shape[0], *resid)
else:
return noise_fn(*shape, device=device)
def linear_beta_schedule(timesteps, min_beta=1e-4, max_beta=0.06):
"""
linear schedule
"""
betas = np.linspace(min_beta, max_beta, timesteps)
return betas
def cosine_beta_schedule(timesteps, s=0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = np.linspace(0, steps, steps)
alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return np.clip(betas, a_min=0, a_max=0.999)
beta_schedule = {
"cosine": cosine_beta_schedule,
"linear": linear_beta_schedule,
}
class GaussianDiffusion(BaseModel):
def __init__(
self,
in_dim,
out_dim,
denoise_fn,
encoder=None,
K_step=100,
betas=None,
schedule_type="linear",
scheduler_params=None,
norm_scale=10,
pndm_speedup=None,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.denoise_fn = denoise_fn
self.K_step = K_step
self.pndm_speedup = pndm_speedup
self.encoder = encoder
self.norm_scale = norm_scale
if scheduler_params is None:
if schedule_type == "linear":
scheduler_params = {"max_beta": 0.06}
elif schedule_type == "cosine":
scheduler_params = {"s": 0.008}
if encoder is not None:
assert encoder.in_dim == in_dim, "encoder input dim must match in_dim"
assert out_dim == denoise_fn.in_dim, "denoise_fn input dim must match out_dim"
if pndm_speedup:
raise NotImplementedError("pndm_speedup is not implemented yet")
if betas is not None:
betas = (
betas.detach().cpu().numpy()
if isinstance(betas, torch.Tensor)
else betas
)
else:
betas = beta_schedule[schedule_type](K_step, **scheduler_params)
alphas = 1.0 - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
self.noise_list = deque(maxlen=4)
to_torch = partial(torch.tensor, dtype=torch.float32)
self.register_buffer("betas", to_torch(betas))
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod)))
self.register_buffer(
"sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod))
)
self.register_buffer(
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod))
)
self.register_buffer(
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod))
)
self.register_buffer(
"sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1))
)
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = (
betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)
)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer("posterior_variance", to_torch(posterior_variance))
# below: log calculation clipped because the posterior variance is 0
# at the beginning of the diffusion chain
self.register_buffer(
"posterior_log_variance_clipped",
to_torch(np.log(np.maximum(posterior_variance, 1e-20))),
)
self.register_buffer(
"posterior_mean_coef1",
to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)),
)
self.register_buffer(
"posterior_mean_coef2",
to_torch(
(1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod)
),
)
# NOTE: since the original impl. assume the data is distributed in [-1, 1]
# let us (roughly) convert N(0,1) noramlized to data to [-1, 1]
def _norm(self, x, a_max=10):
return x / a_max
def _denorm(self, x, a_max=10):
return x * a_max
def prediction_type(self):
return PredictionType.DIFFUSION
def q_mean_variance(self, x_start, t):
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = extract(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(
self.posterior_log_variance_clipped, t, x_t.shape
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, t, cond, clip_denoised: bool):
noise_pred = self.denoise_fn(x, t, cond=cond)
x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
if clip_denoised:
x_recon.clamp_(-1.0, 1.0)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(
x_start=x_recon, x_t=x, t=t
)
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(
self, x, t, cond, noise_fn=torch.randn, clip_denoised=True, repeat_noise=False
):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance = self.p_mean_variance(
x=x, t=t, cond=cond, clip_denoised=clip_denoised
)
noise = noise_like(x.shape, noise_fn, device, repeat_noise)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def p_sample_plms(self, x, t, interval, cond):
"""
Use the PLMS method from Pseudo Numerical Methods for Diffusion Models on Manifolds
https://arxiv.org/abs/2202.09778.
"""
def get_x_pred(x, noise_t, t):
a_t = extract(self.alphas_cumprod, t, x.shape)
a_prev = extract(
self.alphas_cumprod,
torch.max(t - interval, torch.zeros_like(t)),
x.shape,
)
a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt()
x_delta = (a_prev - a_t) * (
(1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x
- 1
/ (a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt()))
* noise_t
)
x_pred = x + x_delta
return x_pred
noise_list = self.noise_list
noise_pred = self.denoise_fn(x, t, cond=cond)
if len(noise_list) == 0:
x_pred = get_x_pred(x, noise_pred, t)
# noise_pred_prev = self.denoise_fn(x_pred, max(t - interval, 0), cond=cond)
noise_pred_prev = self.denoise_fn(
x_pred, torch.max(t - interval, torch.zeros_like(t)), cond=cond
)
noise_pred_prime = (noise_pred + noise_pred_prev) / 2
elif len(noise_list) == 1:
noise_pred_prime = (3 * noise_pred - noise_list[-1]) / 2
elif len(noise_list) == 2:
noise_pred_prime = (
23 * noise_pred - 16 * noise_list[-1] + 5 * noise_list[-2]
) / 12
elif len(noise_list) >= 3:
noise_pred_prime = (
55 * noise_pred
- 59 * noise_list[-1]
+ 37 * noise_list[-2]
- 9 * noise_list[-3]
) / 24
x_prev = get_x_pred(x, noise_pred_prime, t)
noise_list.append(noise_pred)
return x_prev
def q_sample(self, x_start, t, noise=None):
if noise is None:
noise = torch.randn_like(x_start)
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def forward(self, cond, lengths=None, y=None):
"""Forward step
Args:
cond (torch.Tensor): conditioning features of shaep (B, T, encoder_hidden_dim)
lengths (torch.Tensor): lengths of each sequence in the batch
y (torch.Tensor): ground truth of shape (B, T, C)
Returns:
tuple of tensors (B, T, in_dim), (B, T, in_dim)
"""
B = cond.shape[0]
device = cond.device
if self.encoder is not None:
cond = self.encoder(cond, lengths)
# (B, M, T)
cond = cond.transpose(1, 2)
t = torch.randint(0, self.K_step, (B,), device=device).long()
x = self._norm(y, self.norm_scale)
x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T]
noise = torch.randn_like(x)
x_noisy = self.q_sample(x_start=x, t=t, noise=noise)
x_recon = self.denoise_fn(x_noisy, t, cond)
noise = noise.squeeze(1).transpose(1, 2)
x_recon = x_recon.squeeze(1).transpose(1, 2)
return noise, x_recon
def inference(self, cond, lengths=None):
B = cond.shape[0]
device = cond.device
if self.encoder is not None:
cond = self.encoder(cond, lengths)
# (B, M, T)
cond = cond.transpose(1, 2)
t = self.K_step
shape = (cond.shape[0], 1, self.out_dim, cond.shape[2])
x = torch.randn(shape, device=device)
if self.pndm_speedup:
self.noise_list = deque(maxlen=4)
iteration_interval = int(self.pndm_speedup)
for i in tqdm(
reversed(range(0, t, iteration_interval)),
desc="sample time step",
total=t // iteration_interval,
):
x = self.p_sample_plms(
x,
torch.full((B,), i, device=device, dtype=torch.long),
iteration_interval,
cond,
)
else:
for i in tqdm(reversed(range(0, t)), desc="sample time step", total=t):
x = self.p_sample(
x, torch.full((B,), i, device=device, dtype=torch.long), cond
)
x = self._denorm(x[:, 0].transpose(1, 2), self.norm_scale)
return x
| 11,770 | 33.928783 | 91 | py |
nnsvs | nnsvs-master/nnsvs/diffsinger/fs2.py | import math
import torch
from nnsvs.base import BaseModel
from nnsvs.util import make_pad_mask
from torch import nn
from torch.nn import Parameter
from torch.nn import functional as F
def softmax(x, dim):
return F.softmax(x, dim=dim, dtype=torch.float32)
class PositionalEncoding(torch.nn.Module):
"""Positional encoding.
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
reverse (bool): Whether to reverse the input position.
"""
def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):
"""Construct an PositionalEncoding object."""
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.reverse = reverse
self.xscale = math.sqrt(self.d_model)
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
def extend_pe(self, x):
"""Reset the positional encodings."""
if self.pe is not None:
if self.pe.size(1) >= x.size(1):
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
pe = torch.zeros(x.size(1), self.d_model)
if self.reverse:
position = torch.arange(
x.size(1) - 1, -1, -1.0, dtype=torch.float32
).unsqueeze(1)
else:
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32)
* -(math.log(10000.0) / self.d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, x: torch.Tensor):
"""Add positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x)
x = x * self.xscale + self.pe[:, : x.size(1)]
return self.dropout(x)
class RelPositionalEncoding(PositionalEncoding):
"""Relative positional encoding module.
See : Appendix B in https://arxiv.org/abs/1901.02860
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
"""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Initialize class."""
super().__init__(d_model, dropout_rate, max_len, reverse=True)
def forward(self, x):
"""Compute positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
torch.Tensor: Positional embedding tensor (1, time, `*`).
"""
self.extend_pe(x)
x = x * self.xscale
pos_emb = self.pe[:, : x.size(1)]
return self.dropout(x) + self.dropout(pos_emb)
class MultiheadAttention(nn.Module):
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
if self.qkv_same_dim:
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
else:
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.enable_torch_version = False
if hasattr(F, "multi_head_attention_forward"):
self.enable_torch_version = True
else:
self.enable_torch_version = False
self.last_attn_probs = None
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
else:
nn.init.xavier_uniform_(self.k_proj_weight)
nn.init.xavier_uniform_(self.v_proj_weight)
nn.init.xavier_uniform_(self.q_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key,
value,
key_padding_mask=None,
incremental_state=None,
need_weights=True,
static_kv=False,
attn_mask=None,
before_softmax=False,
need_head_weights=False,
enc_dec_attn_constraint_mask=None,
reset_attn_weight=None,
):
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if (
self.enable_torch_version
and incremental_state is None
and not static_kv
and reset_attn_weight is None
):
if self.qkv_same_dim:
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
self.training,
key_padding_mask,
need_weights,
attn_mask,
)
else:
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
self.training,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
)
if incremental_state is not None:
raise NotImplementedError()
else:
saved_state = None
if self.self_attention:
# self-attention
q, k, v = self.in_proj_qkv(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k = self.in_proj_k(key)
v = self.in_proj_v(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
raise NotImplementedError()
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
if len(attn_mask.shape) == 2:
attn_mask = attn_mask.unsqueeze(0)
elif len(attn_mask.shape) == 3:
attn_mask = (
attn_mask[:, None]
.repeat([1, self.num_heads, 1, 1])
.reshape(bsz * self.num_heads, tgt_len, src_len)
)
attn_weights = attn_weights + attn_mask
if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
-1e9,
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
-1e9,
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(
attn_weights_float.type_as(attn_weights),
p=self.dropout,
training=self.training,
)
if reset_attn_weight is not None:
if reset_attn_weight:
self.last_attn_probs = attn_probs.detach()
else:
assert self.last_attn_probs is not None
attn_probs = self.last_attn_probs
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
else:
attn_weights = None
return attn, (attn_weights, attn_logits)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_q(self, query):
if self.qkv_same_dim:
return self._in_proj(query, end=self.embed_dim)
else:
bias = self.in_proj_bias
if bias is not None:
bias = bias[: self.embed_dim]
return F.linear(query, self.q_proj_weight, bias)
def in_proj_k(self, key):
if self.qkv_same_dim:
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
else:
weight = self.k_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[self.embed_dim : 2 * self.embed_dim]
return F.linear(key, weight, bias)
def in_proj_v(self, value):
if self.qkv_same_dim:
return self._in_proj(value, start=2 * self.embed_dim)
else:
weight = self.v_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[2 * self.embed_dim :]
return F.linear(value, weight, bias)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
return attn_weights
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True):
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class BatchNorm1dTBC(nn.Module):
def __init__(self, c):
super(BatchNorm1dTBC, self).__init__()
self.bn = nn.BatchNorm1d(c)
def forward(self, x):
"""
:param x: [T, B, C]
:return: [T, B, C]
"""
x = x.permute(1, 2, 0) # [B, C, T]
x = self.bn(x) # [B, C, T]
x = x.permute(2, 0, 1) # [T, B, C]
return x
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class Swish(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class CustomSwish(nn.Module):
def forward(self, input_tensor):
return Swish.apply(input_tensor)
class TransformerFFNLayer(nn.Module):
def __init__(
self,
hidden_size,
filter_size,
padding="SAME",
kernel_size=1,
dropout=0.0,
act="gelu",
):
super().__init__()
self.kernel_size = kernel_size
self.dropout = dropout
self.act = act
if padding == "SAME":
self.ffn_1 = nn.Conv1d(
hidden_size, filter_size, kernel_size, padding=kernel_size // 2
)
elif padding == "LEFT":
self.ffn_1 = nn.Sequential(
nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
nn.Conv1d(hidden_size, filter_size, kernel_size),
)
self.ffn_2 = Linear(filter_size, hidden_size)
if self.act == "swish":
self.swish_fn = CustomSwish()
def forward(self, x, incremental_state=None):
# x: T x B x C
if incremental_state is not None:
assert incremental_state is None, "Nar-generation does not allow this."
x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
x = x * self.kernel_size ** -0.5
if incremental_state is not None:
x = x[-1:]
if self.act == "gelu":
x = F.gelu(x)
if self.act == "relu":
x = F.relu(x)
if self.act == "swish":
x = self.swish_fn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.ffn_2(x)
return x
class EncSALayer(nn.Module):
def __init__(
self,
c,
num_heads,
dropout,
attention_dropout=0.1,
relu_dropout=0.1,
kernel_size=9,
padding="SAME",
norm="ln",
act="gelu",
):
super().__init__()
self.c = c
self.dropout = dropout
self.num_heads = num_heads
if num_heads > 0:
if norm == "ln":
self.layer_norm1 = LayerNorm(c)
elif norm == "bn":
self.layer_norm1 = BatchNorm1dTBC(c)
self.self_attn = MultiheadAttention(
self.c,
num_heads,
self_attention=True,
dropout=attention_dropout,
bias=False,
)
if norm == "ln":
self.layer_norm2 = LayerNorm(c)
elif norm == "bn":
self.layer_norm2 = BatchNorm1dTBC(c)
self.ffn = TransformerFFNLayer(
c,
4 * c,
kernel_size=kernel_size,
dropout=relu_dropout,
padding=padding,
act=act,
)
def forward(self, x, encoder_padding_mask=None, **kwargs):
layer_norm_training = kwargs.get("layer_norm_training", None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
if self.num_heads > 0:
residual = x
x = self.layer_norm1(x)
x, _, = self.self_attn(
query=x, key=x, value=x, key_padding_mask=encoder_padding_mask
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
residual = x
x = self.layer_norm2(x)
x = self.ffn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
return x
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
hidden_size,
dropout,
kernel_size=None,
num_heads=2,
norm="ln",
padding="SAME",
act="gelu",
):
super().__init__()
self.hidden_size = hidden_size
self.dropout = dropout
self.num_heads = num_heads
self.op = EncSALayer(
hidden_size,
num_heads,
dropout=dropout,
attention_dropout=0.0,
relu_dropout=dropout,
kernel_size=kernel_size,
padding=padding,
norm=norm,
act=act,
)
def forward(self, x, **kwargs):
return self.op(x, **kwargs)
class FFTBlocks(nn.Module):
def __init__(
self,
hidden_size,
num_layers,
ffn_kernel_size=9,
dropout=0.1,
num_heads=2,
use_pos_embed=True,
use_last_norm=True,
norm="ln",
use_pos_embed_alpha=True,
):
super().__init__()
self.num_layers = num_layers
embed_dim = self.hidden_size = hidden_size
self.dropout = dropout
self.use_pos_embed = use_pos_embed
self.use_last_norm = use_last_norm
if use_pos_embed:
self.padding_idx = 0
self.pos_embed_alpha = (
nn.Parameter(torch.Tensor([1])) if use_pos_embed_alpha else 1
)
self.embed_positions = RelPositionalEncoding(hidden_size, dropout_rate=0.0)
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerEncoderLayer(
self.hidden_size,
self.dropout,
kernel_size=ffn_kernel_size,
num_heads=num_heads,
)
for _ in range(self.num_layers)
]
)
if self.use_last_norm:
if norm == "ln":
self.layer_norm = nn.LayerNorm(embed_dim)
elif norm == "bn":
self.layer_norm = BatchNorm1dTBC(embed_dim)
else:
self.layer_norm = None
def forward(self, x, lengths, padding_mask=None):
"""
:param x: [B, T, C]
:param padding_mask: [B, T]
:return: [B, T, C] or [L, B, T, C]
"""
if padding_mask is None:
padding_mask = make_pad_mask(lengths).to(x.device)
# padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask
nonpadding_mask_TB = (
1 - padding_mask.transpose(0, 1).float()[:, :, None]
) # [T, B, 1]
if self.use_pos_embed:
# positions = self.pos_embed_alpha * self.embed_positions(x[..., 0])
positions = self.pos_embed_alpha * self.embed_positions(x)
x = x + positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1) * nonpadding_mask_TB
hiddens = []
for layer in self.layers:
x = layer(x, encoder_padding_mask=padding_mask) * nonpadding_mask_TB
hiddens.append(x)
if self.use_last_norm:
x = self.layer_norm(x) * nonpadding_mask_TB
x = x.transpose(0, 1) # [B, T, C]
return x
class FFTBlocksEncoder(BaseModel):
def __init__(
self,
in_dim,
out_dim,
hidden_dim,
num_layers=2,
ffn_kernel_size=9,
dropout=0.1,
num_heads=2,
use_pos_embed=True,
use_last_norm=True,
norm="ln",
use_pos_embed_alpha=True,
reduction_factor=1,
downsample_by_conv=True,
in_ph_start_idx: int = 1,
in_ph_end_idx: int = 50,
embed_dim=None,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.in_ph_start_idx = in_ph_start_idx
self.in_ph_end_idx = in_ph_end_idx
self.num_vocab = in_ph_end_idx - in_ph_start_idx
self.embed_dim = embed_dim
self.reduction_factor = reduction_factor
if self.embed_dim is not None:
assert in_dim > self.num_vocab
self.emb = nn.Embedding(self.num_vocab, embed_dim)
self.fc_in = nn.Linear(in_dim - self.num_vocab, embed_dim)
self.fc = nn.Linear(embed_dim, hidden_dim)
else:
self.emb = None
self.fc_in = None
self.fc = nn.Linear(in_dim, hidden_dim)
if reduction_factor > 1 and downsample_by_conv:
self.conv_downsample = nn.Conv1d(
in_dim,
in_dim,
kernel_size=reduction_factor,
stride=reduction_factor,
groups=in_dim,
)
else:
self.conv_downsample = None
self.encoder = FFTBlocks(
hidden_size=hidden_dim,
num_layers=num_layers,
ffn_kernel_size=ffn_kernel_size,
dropout=dropout,
num_heads=num_heads,
use_pos_embed=use_pos_embed,
use_last_norm=use_last_norm,
norm=norm,
use_pos_embed_alpha=use_pos_embed_alpha,
)
self.fc_out = nn.Linear(hidden_dim, out_dim * reduction_factor)
def forward(self, x, lengths, y=None):
if self.embed_dim is not None:
x_first, x_ph_onehot, x_last = torch.split(
x,
[
self.in_ph_start_idx,
self.num_vocab,
self.in_dim - self.num_vocab - self.in_ph_start_idx,
],
dim=-1,
)
x_ph = torch.argmax(x_ph_onehot, dim=-1)
# Make sure to have one-hot vector
assert (x_ph_onehot.sum(-1) <= 1).all()
x = self.emb(x_ph) + self.fc_in(torch.cat([x_first, x_last], dim=-1))
# Adjust lengths based on the reduction factor
if self.reduction_factor > 1:
lengths = (lengths / self.reduction_factor).long()
if self.conv_downsample is not None:
x = self.conv_downsample(x.transpose(1, 2)).transpose(1, 2)
else:
x = x[:, self.reduction_factor - 1 :: self.reduction_factor]
x = self.fc(x)
x = self.encoder(x, lengths)
x = self.fc_out(x).view(x.shape[0], -1, self.out_dim)
return x
| 28,175 | 32.703349 | 93 | py |
nnsvs | nnsvs-master/nnsvs/diffsinger/pe.py | import math
import torch
from torch import nn
def denorm_f0(
f0,
uv,
pitch_padding=None,
min=None,
max=None,
pitch_norm="log",
use_uv=True,
f0_std=1.0,
f0_mean=0.0,
):
assert use_uv
if pitch_norm == "standard":
f0 = f0 * f0_std + f0_mean
elif pitch_norm == "log":
f0 = 2 ** f0
if min is not None:
f0 = f0.clamp(min=min)
if max is not None:
f0 = f0.clamp(max=max)
if uv is not None and use_uv:
f0[uv > 0] = 0
if pitch_padding is not None:
f0[pitch_padding] = 0
return f0
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if not export and torch.cuda.is_available():
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class ConvNorm(torch.nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)
)
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
def make_positions(tensor, padding_idx):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(
self, input, incremental_state=None, timestep=None, positions=None, **kwargs
):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = (
make_positions(input, self.padding_idx) if positions is None else positions
)
return (
self.weights.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
.detach()
)
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
class FSLayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""
def __init__(self, nout, dim=-1):
"""Construct an LayerNorm object."""
super(FSLayerNorm, self).__init__(nout, eps=1e-12)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""
if self.dim == -1:
return super(FSLayerNorm, self).forward(x)
return super(FSLayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
class PitchPredictor(torch.nn.Module):
def __init__(
self,
idim,
n_layers=5,
n_chans=384,
odim=2,
kernel_size=5,
dropout_rate=0.1,
padding="SAME",
):
"""Initialize pitch predictor module.
Args:
idim (int): Input dimension.
n_layers (int, optional): Number of convolutional layers.
n_chans (int, optional): Number of channels of convolutional layers.
kernel_size (int, optional): Kernel size of convolutional layers.
dropout_rate (float, optional): Dropout rate.
"""
super(PitchPredictor, self).__init__()
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
self.padding = padding
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [
torch.nn.Sequential(
torch.nn.ConstantPad1d(
((kernel_size - 1) // 2, (kernel_size - 1) // 2)
if padding == "SAME"
else (kernel_size - 1, 0),
0,
),
torch.nn.Conv1d(
in_chans, n_chans, kernel_size, stride=1, padding=0
),
torch.nn.ReLU(),
FSLayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate),
)
]
self.linear = torch.nn.Linear(n_chans, odim)
self.embed_positions = SinusoidalPositionalEmbedding(idim, 0, init_size=4096)
self.pos_embed_alpha = nn.Parameter(torch.Tensor([1]))
def forward(self, xs):
"""
:param xs: [B, T, H]
:return: [B, T, H]
"""
positions = self.pos_embed_alpha * self.embed_positions(xs[..., 0])
xs = xs + positions
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
xs = f(xs) # (B, C, Tmax)
# NOTE: calculate in log domain
xs = self.linear(xs.transpose(1, -1)) # (B, Tmax, H)
return xs
class Prenet(nn.Module):
def __init__(self, in_dim=80, out_dim=256, kernel=5, n_layers=3, strides=None):
super(Prenet, self).__init__()
padding = kernel // 2
self.layers = []
self.strides = strides if strides is not None else [1] * n_layers
for idx in range(n_layers):
self.layers.append(
nn.Sequential(
nn.Conv1d(
in_dim,
out_dim,
kernel_size=kernel,
padding=padding,
stride=self.strides[idx],
),
nn.ReLU(),
nn.BatchNorm1d(out_dim),
)
)
in_dim = out_dim
self.layers = nn.ModuleList(self.layers)
self.out_proj = nn.Linear(out_dim, out_dim)
def forward(self, x):
"""
:param x: [B, T, 80]
:return: [L, B, T, H], [B, T, H]
"""
padding_mask = x.abs().sum(-1).eq(0).data # [B, T]
nonpadding_mask_TB = 1 - padding_mask.float()[:, None, :] # [B, 1, T]
x = x.transpose(1, 2)
hiddens = []
for i, l in enumerate(self.layers):
nonpadding_mask_TB = nonpadding_mask_TB[:, :, :: self.strides[i]]
x = l(x) * nonpadding_mask_TB
hiddens.append(x)
hiddens = torch.stack(hiddens, 0) # [L, B, H, T]
hiddens = hiddens.transpose(2, 3) # [L, B, T, H]
x = self.out_proj(x.transpose(1, 2)) # [B, T, H]
x = x * nonpadding_mask_TB.transpose(1, 2)
return hiddens, x
class ConvBlock(nn.Module):
def __init__(
self, idim=80, n_chans=256, kernel_size=3, stride=1, norm="gn", dropout=0
):
super().__init__()
self.conv = ConvNorm(idim, n_chans, kernel_size, stride=stride)
self.norm = norm
if self.norm == "bn":
self.norm = nn.BatchNorm1d(n_chans)
elif self.norm == "in":
self.norm = nn.InstanceNorm1d(n_chans, affine=True)
elif self.norm == "gn":
self.norm = nn.GroupNorm(n_chans // 16, n_chans)
elif self.norm == "ln":
self.norm = LayerNorm(n_chans // 16, n_chans)
elif self.norm == "wn":
self.conv = torch.nn.utils.weight_norm(self.conv.conv)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, x):
"""
:param x: [B, C, T]
:return: [B, C, T]
"""
x = self.conv(x)
if not isinstance(self.norm, str):
if self.norm == "none":
pass
elif self.norm == "ln":
x = self.norm(x.transpose(1, 2)).transpose(1, 2)
else:
x = self.norm(x)
x = self.relu(x)
x = self.dropout(x)
return x
class ConvStacks(nn.Module):
def __init__(
self,
idim=80,
n_layers=5,
n_chans=256,
odim=32,
kernel_size=5,
norm="gn",
dropout=0,
strides=None,
res=True,
):
super().__init__()
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
self.res = res
self.in_proj = Linear(idim, n_chans)
if strides is None:
strides = [1] * n_layers
else:
assert len(strides) == n_layers
for idx in range(n_layers):
self.conv.append(
ConvBlock(
n_chans,
n_chans,
kernel_size,
stride=strides[idx],
norm=norm,
dropout=dropout,
)
)
self.out_proj = Linear(n_chans, odim)
def forward(self, x, return_hiddens=False):
"""
:param x: [B, T, H]
:return: [B, T, H]
"""
x = self.in_proj(x)
x = x.transpose(1, -1) # (B, idim, Tmax)
hiddens = []
for f in self.conv:
x_ = f(x)
x = x + x_ if self.res else x_ # (B, C, Tmax)
hiddens.append(x)
x = x.transpose(1, -1)
x = self.out_proj(x) # (B, Tmax, H)
if return_hiddens:
hiddens = torch.stack(hiddens, 1) # [B, L, C, T]
return x, hiddens
return x
class PitchExtractor(nn.Module):
def __init__(
self,
n_mel_bins=80,
conv_layers=2,
hidden_size=256,
predictor_hidden=-1,
ffn_padding="SAME",
predictor_kernel=5,
pitch_type="frame",
use_uv=True,
):
super().__init__()
self.hidden_size = hidden_size
self.pitch_type = pitch_type
assert pitch_type == "log"
self.use_uv = use_uv
self.predictor_hidden = (
predictor_hidden if predictor_hidden > 0 else self.hidden_size
)
self.conv_layers = conv_layers
self.mel_prenet = Prenet(n_mel_bins, self.hidden_size, strides=[1, 1, 1])
if self.conv_layers > 0:
self.mel_encoder = ConvStacks(
idim=self.hidden_size,
n_chans=self.hidden_size,
odim=self.hidden_size,
n_layers=self.conv_layers,
)
self.pitch_predictor = PitchPredictor(
self.hidden_size,
n_chans=self.predictor_hidden,
n_layers=5,
dropout_rate=0.1,
odim=2,
padding=ffn_padding,
kernel_size=predictor_kernel,
)
def forward(self, mel_input=None):
mel_hidden = self.mel_prenet(mel_input)[1]
if self.conv_layers > 0:
mel_hidden = self.mel_encoder(mel_hidden)
# log2(f0), uv
pitch_pred = self.pitch_predictor(mel_hidden)
lf0, uv = pitch_pred[:, :, 0], pitch_pred[:, :, 1]
# f0
f0 = 2 ** lf0
# log(f0)
lf0 = torch.log(f0)
lf0[uv > 0] = 0
return lf0
class PitchExtractorWrapper(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.model = PitchExtractor(**kwargs)
def forward(self, x, lengths=None, y=None):
return self.pitch_extractor(x)
| 14,548 | 30.087607 | 87 | py |
nnsvs | nnsvs-master/nnsvs/diffsinger/denoiser.py | import math
from math import sqrt
import torch
import torch.nn as nn
import torch.nn.functional as F
class Mish(nn.Module):
def forward(self, x):
return x * torch.tanh(F.softplus(x))
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
def Conv1d(*args, **kwargs):
layer = nn.Conv1d(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer
@torch.jit.script
def silu(x):
return x * torch.sigmoid(x)
class ResidualBlock(nn.Module):
def __init__(self, encoder_hidden, residual_channels, dilation):
super().__init__()
self.dilated_conv = Conv1d(
residual_channels,
2 * residual_channels,
3,
padding=dilation,
dilation=dilation,
)
self.diffusion_projection = nn.Linear(residual_channels, residual_channels)
self.conditioner_projection = Conv1d(encoder_hidden, 2 * residual_channels, 1)
self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1)
def forward(self, x, conditioner, diffusion_step):
diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
conditioner = self.conditioner_projection(conditioner)
y = x + diffusion_step
y = self.dilated_conv(y) + conditioner
gate, filter = torch.chunk(y, 2, dim=1)
y = torch.sigmoid(gate) * torch.tanh(filter)
y = self.output_projection(y)
residual, skip = torch.chunk(y, 2, dim=1)
return (x + residual) / sqrt(2.0), skip
class DiffNet(nn.Module):
def __init__(
self,
in_dim=80,
encoder_hidden_dim=256,
residual_layers=20,
residual_channels=256,
dilation_cycle_length=4,
):
super().__init__()
self.in_dim = in_dim
self.input_projection = Conv1d(in_dim, residual_channels, 1)
self.diffusion_embedding = SinusoidalPosEmb(residual_channels)
dim = residual_channels
self.mlp = nn.Sequential(
nn.Linear(dim, dim * 4), Mish(), nn.Linear(dim * 4, dim)
)
self.residual_layers = nn.ModuleList(
[
ResidualBlock(
encoder_hidden_dim,
residual_channels,
2 ** (i % dilation_cycle_length),
)
for i in range(residual_layers)
]
)
self.skip_projection = Conv1d(residual_channels, residual_channels, 1)
self.output_projection = Conv1d(residual_channels, in_dim, 1)
nn.init.zeros_(self.output_projection.weight)
def forward(self, spec, diffusion_step, cond):
"""
:param spec: [B, 1, M, T]
:param diffusion_step: [B, 1]
:param cond: [B, M, T]
:return:
"""
x = spec[:, 0]
x = self.input_projection(x) # x [B, residual_channel, T]
x = F.relu(x)
diffusion_step = self.diffusion_embedding(diffusion_step)
diffusion_step = self.mlp(diffusion_step)
skip = []
for _, layer in enumerate(self.residual_layers):
x, skip_connection = layer(x, cond, diffusion_step)
skip.append(skip_connection)
x = torch.sum(torch.stack(skip), dim=0) / sqrt(len(self.residual_layers))
x = self.skip_projection(x)
x = F.relu(x)
x = self.output_projection(x) # [B, 80, T]
return x[:, None, :, :]
| 3,818 | 29.552 | 86 | py |
ShapeMOD | ShapeMOD-main/SA_lang/sa_utils.py | import torch
import torch.nn as nn
import numpy as np
import tasks.ShapeAssembly as sa
from copy import deepcopy
MAX_CUBES = 10
PREC = 4
TRANS_NORM = 10.
SQUARE_THRESH = .1
def loadObj(infile):
tverts = []
ttris = []
with open(infile) as f:
for line in f:
ls = line.split()
if len(ls) == 0:
continue
if ls[0] == 'v':
tverts.append([
float(ls[1]),
float(ls[2]),
float(ls[3])
])
elif ls[0] == 'f':
ttris.append([
int(ls[1].split('//')[0])-1,
int(ls[2].split('//')[0])-1,
int(ls[3].split('//')[0])-1
])
return tverts, ttris
def make_function(name, args):
args = [str(arg) for arg in args]
return '{}({})'.format(name, ", ".join(args))
def assign(var_name, value):
return '{} = {}'.format(var_name, value)
def get_aligned_flags(nodes):
caligned = [1] * len(nodes[0]['cubes'])
for node in nodes:
for i in range(1, len(caligned)):
if not node['aligned'][i]:
caligned[i] = 0
return caligned
def NodeToDirTensor(props):
for key in ['xdir', 'ydir', 'zdir']:
if not isinstance(props[key], np.ndarray):
props[key] = np.array(props[key])
return props
def get_flex_aligned_flags(nodes, thresh):
caligned = [1] * len(nodes[0]['cubes'])
for node in nodes:
for i in range(1, len(caligned)):
if not isFlexAxisAligned(
NodeToDirTensor(node['cubes'][i]),
NodeToDirTensor(node['cubes'][0]),
thresh
):
caligned[i] = 0
return caligned
def isAxisAligned(props, bbox, thresh):
xdir = props['xdir']
ydir = props['ydir']
zdir = props['zdir']
xdir /= np.linalg.norm(xdir)
ydir /= np.linalg.norm(ydir)
zdir /= np.linalg.norm(zdir)
if xdir.dot(bbox['xdir']) < thresh:
return False
if ydir.dot(bbox['ydir']) < thresh:
return False
if zdir.dot(bbox['zdir']) < thresh:
return False
return True
def isSquare(a, b, c):
a = np.array(a)
b = np.array(b)
c = np.array(c)
v = np.abs(a - b)/max(a.item(), b.item(), c.item()) < SQUARE_THRESH
return v
def isSpecAxisAligned(cdir, axis, thresh):
cdir /= np.linalg.norm(cdir)
if cdir.dot(axis) >= thresh:
return True
else:
return False
def isFlexAxisAligned(props, bbox, thresh):
if isAxisAligned(props, bbox, thresh):
return True
xsquare = isSquare(props['zd'], props['yd'], props['xd'])
ysquare = isSquare(props['zd'], props['xd'], props['yd'])
zsquare = isSquare(props['xd'], props['yd'], props['zd'])
if xsquare and ysquare and zsquare:
return True
if xsquare:
return isSpecAxisAligned(props['xdir'], bbox['xdir'], thresh)
if ysquare:
return isSpecAxisAligned(props['ydir'], bbox['ydir'], thresh)
if zsquare:
return isSpecAxisAligned(props['zdir'], bbox['zdir'], thresh)
return False
def vector_cos(norm1, norm2):
norm1 = np.asarray(norm1)
norm2 = np.asarray(norm2)
dot = np.dot(norm1, norm2)
magnitude = np.linalg.norm(norm1) * np.linalg.norm(norm2)
if magnitude == 0.:
return 0.
return dot / float(magnitude)
def orientProps(center, xd, yd, zd, xdir, ydir, zdir):
rt = np.asarray([1., 0., 0.])
up = np.asarray([0., 1., 0.])
fwd = np.asarray([0., 0., 1.])
l = [
(xdir, xd, 0),
(ydir, yd, 1),
(zdir, zd, 2),
(-1 * xdir, xd, 3),
(-1 * ydir, yd, 4),
(-1 * zdir, zd, 5)
]
rtdir, rtd, rind = sorted(
deepcopy(l), key=lambda x: vector_cos(rt, x[0]))[-1]
if rind >= 3:
l.pop(rind)
l.pop((rind+3) % 6)
else:
l.pop((rind+3) % 6)
l.pop(rind)
for i in range(0, 4):
p_ind = l[i][2]
if p_ind > max(rind, (rind+3) % 6):
l[i] = (l[i][0], l[i][1], l[i][2] - 2)
elif p_ind > min(rind, (rind+3) % 6):
l[i] = (l[i][0], l[i][1], l[i][2] - 1)
updir, upd, upind = sorted(
deepcopy(l), key=lambda x: vector_cos(up, x[0]))[-1]
if upind >= 2:
l.pop(upind)
l.pop((upind+2) % 4)
else:
l.pop((upind+2) % 4)
l.pop(upind)
fwdir, fwd, _ = sorted(l, key=lambda x: vector_cos(fwd, x[0]))[-1]
return {
'center': torch.tensor(center).float(),
'xd': torch.tensor(rtd).float(),
'yd': torch.tensor(upd).float(),
'zd': torch.tensor(fwd).float(),
'xdir': torch.tensor(rtdir).float(),
'ydir': torch.tensor(updir).float(),
'zdir': torch.tensor(fwdir).float()
}
def locallyClean(prog):
cube_count = -1
switches = []
for line in prog:
if 'Cuboid' in line:
if 'Program_' in line:
switches.append((
f'cube{cube_count}', line.split()[0]
))
cube_count += 1
for a, b in switches:
prog = [line.replace(b, a) for line in prog]
clines = []
P = sa.Program()
for line in prog:
if "Cuboid(" in line:
parse = P.parseCuboid(line)
name = parse[0]
x = float(max(parse[1].item(), 0.01))
y = float(max(parse[2].item(), 0.01))
z = float(max(parse[3].item(), 0.01))
aligned = str(parse[4])
clines.append("\t" + gen.assign(
name, gen.make_function('Cuboid', [x, y, z, aligned])))
if "attach(" in line:
parse = P.parseAttach(line)
clines.append("\t" + gen.make_function(
"attach",
[parse[0], parse[1]] + [
torch.clamp(co, 0.0, 1.0).item() for co in parse[2:]
]
))
if "squeeze(" in line:
parse = P.parseSqueeze(line)
clines.append("\t" + gen.make_function(
"squeeze",
[parse[0], parse[1], parse[2], parse[3]] +
[max(min(co, 1.0), 0.0) for co in parse[4:]]
))
if "reflect(" in line:
parse = P.parseReflect(line)
clines.append("\t" + gen.make_function(
"reflect",
[parse[0], parse[1]]
))
if "translate(" in line:
parse = P.parseTranslate(line)
clines.append("\t" + make_function(
"translate",
[
parse[0],
parse[1],
max(int(parse[2]), 1),
float(min(max(parse[3], 0.0), 1.0))
]
))
return clines
def fillHP(name, progs, children, program_line_maps = None):
hp = {'name': name}
hp['prog'] = locallyClean(progs[name])
hp['children'] = [
fillHP(cn, progs, children, program_line_maps) if cn is not None else {} for cn in children[name]
]
hp['prog_global_line_indices'] = program_line_maps[name] if program_line_maps else None
if len(hp['children']) > MAX_CUBES:
return {}
return hp
def loadFromText(text):
progs = {}
children = {}
# This maps program line numbers to global line numbers.
program_line_maps = {}
prog_num = -1
cur_prog = []
cur_children = []
cur_program_line_map = []
for index, line in enumerate(text.split('\n')):
line = line + '\n'
ls = line.split()
if len(ls) == 0:
continue
if ls[0] == 'Assembly':
prog_num = int(ls[1].split('_')[1])
elif ls[0] == '}':
progs[prog_num] = cur_prog
program_line_maps[prog_num] = cur_program_line_map
children[prog_num] = cur_children
cur_prog = []
cur_children = []
cur_program_line_map = []
elif 'Cuboid' in line:
if 'Program_' in line:
cur_children.append(int(ls[0].split('_')[1]))
else:
cur_children.append(None)
# For function invocations, append the line's arguments to the current program.
appendable_commands = ['Cuboid', 'attach', 'reflect', 'translate', 'squeeze']
for appendable_command in appendable_commands:
if appendable_command in line:
cur_prog.append(line[1:-1])
cur_program_line_map.append(index)
return fillHP(0, progs, children, program_line_maps)
def loadHPFromFile(progfile):
progs = {}
children = {}
prog_num = -1
cur_prog = []
cur_children = []
with open(progfile) as f:
for line in f:
ls = line.split()
if ls[0] == 'Assembly':
prog_num = int(ls[1].split('_')[1])
elif ls[0] == '}':
progs[prog_num] = cur_prog
children[prog_num] = cur_children
cur_prog = []
cur_children = []
elif 'Cuboid' in line:
if 'Program_' in line:
cur_children.append(int(ls[0].split('_')[1]))
else:
cur_children.append(None)
cur_prog.append(line[1:-1])
elif 'attach' in line:
cur_prog.append(line[1:-1])
elif 'reflect' in line:
cur_prog.append(line[1:-1])
elif 'translate' in line:
cur_prog.append(line[1:-1])
elif 'squeeze' in line:
cur_prog.append(line[1:-1])
return fillHP(0, progs, children)
def locallyNormClean(prog, max_val):
cube_count = -1
switches = []
for line in prog:
if 'Cuboid' in line:
if 'Program_' in line:
switches.append((
f'cube{cube_count}', line.split()[0]
))
cube_count += 1
for a, b in switches:
prog = [line.replace(b, a) for line in prog]
clines = []
P = sa.Program()
for line in prog:
if "Cuboid(" in line:
parse = P.parseCuboid(line)
name = parse[0]
x = float(min(max(parse[1].item() / max_val, 0.01), 1.0))
y = float(min(max(parse[2].item() / max_val, 0.01), 1.0))
z = float(min(max(parse[3].item() / max_val, 0.01), 1.0))
clines.append("\t" + assign(
name, make_function('Cuboid', [x, y, z])))
if "attach(" in line:
parse = P.parseAttach(line)
clines.append("\t" + make_function(
"attach",
[parse[0], parse[1]] + [
torch.clamp(co, 0.0, 1.0).item() for co in parse[2:]
]
))
if "reflect(" in line:
parse = P.parseReflect(line)
clines.append("\t" + make_function(
"reflect",
[parse[0], parse[1]]
))
if "translate(" in line:
parse = P.parseTranslate(line)
clines.append("\t" + make_function(
"translate",
[
parse[0],
parse[1],
max(int(parse[2]), 1),
float(min(max(parse[3], 0.0), 1.0))
]
))
return clines
def fillNormHP(name, progs, children, max_val=None):
hp = {'name': name}
if max_val is None:
max_val = max([float(d)
for d in progs[name][0][:-1].split('(')[1].split(',')])
hp['prog'] = locallyNormClean(progs[name], max_val)
hp['children'] = [
fillNormHP(cn, progs, children, max_val) if cn is not None else {} for cn in children[name]
]
if len(hp['children']) > MAX_CUBES:
return {}
return hp
def loadNormHPFromFile(progfile):
progs = {}
children = {}
prog_num = -1
cur_prog = []
cur_children = []
with open(progfile) as f:
for line in f:
ls = line.split()
if ls[0] == 'Assembly':
prog_num = int(ls[1].split('_')[1])
elif ls[0] == '}':
progs[prog_num] = cur_prog
children[prog_num] = cur_children
cur_prog = []
cur_children = []
elif 'Cuboid' in line:
if 'Program_' in line:
cur_children.append(int(ls[0].split('_')[1]))
else:
cur_children.append(None)
cur_prog.append(line[1:-1])
elif 'attach' in line:
cur_prog.append(line[1:-1])
elif 'reflect' in line:
cur_prog.append(line[1:-1])
elif 'translate' in line:
cur_prog.append(line[1:-1])
return fillNormHP(0, progs, children)
def getHierProgLines(root, prog_field):
prog_count = 0
root["prog_num"] = prog_count
lines = []
q = [root]
while(len(q) > 0):
node = q.pop(0)
lines.append(f"Assembly Program_{node['prog_num']}" +" {")
NAME_DICT = {}
c = 0
for ret, line in node[prog_field]:
params = line[1:]
fn = line[0]
for p_ret in ret:
if len(node['children'][c]) > 0:
prog_count += 1
name = f'Program_{prog_count}'
node['children'][c]['prog_num'] = prog_count
else:
name = p_ret
NAME_DICT[p_ret] = name
c += 1
clean_ret = [NAME_DICT[r] for r in ret]
clean_params = [NAME_DICT[p] if p in NAME_DICT else p for p in params]
if len(ret) > 0:
lines.append("\t" + assign(','.join(clean_ret), make_function(fn, clean_params)))
else:
lines.append("\t" + make_function(fn, clean_params))
lines.append("}")
for c in node["children"]:
if c is not None and len(c) > 0:
if "prog_num" in c:
q.append(c)
return lines
def sagetHierProgLines(root):
prog_count = 0
root["prog_num"] = prog_count
lines = []
q = [root]
P = sa.Program()
while(len(q) > 0):
node = q.pop(0)
lines.append(f"Assembly Program_{node['prog_num']}" +" {")
NAME_DICT = {}
c = 0
for line in node["prog"]:
if "Cuboid(" in line:
parse = P.parseCuboid(line)
if len(node["children"][c]) > 0:
prog_count += 1
name = f"Program_{prog_count}"
node["children"][c]["prog_num"] = prog_count
else:
name = parse[0]
NAME_DICT[parse[0]] = name
x = round(float(parse[1]), PREC)
y = round(float(parse[2]), PREC)
z = round(float(parse[3]), PREC)
aligned = str(parse[4])
lines.append("\t" + assign(
name, make_function('Cuboid', [x,y,z,aligned]))
)
c += 1
if "attach(" in line:
parse = P.parseAttach(line)
lines.append("\t" + make_function(
"attach",
[NAME_DICT[parse[0]], NAME_DICT[parse[1]]] + [round(co.item(), PREC) for co in parse[2:]]
))
if "reflect(" in line:
parse = P.parseReflect(line)
lines.append("\t" + make_function(
"reflect",
[NAME_DICT[parse[0]], parse[1]]
))
if "translate(" in line:
parse = P.parseTranslate(line)
lines.append("\t" + make_function(
"translate",
[NAME_DICT[parse[0]], parse[1], int(parse[2]), round(float(parse[3]), PREC)]
))
if "rotate(" in line:
parse = P.parseRotate(line)
lines.append("\t" + make_function(
"rotate",
[NAME_DICT[parse[0]], int(parse[1]), round(float(parse[2]), PREC)]
))
if "squeeze(" in line:
parse = P.parseSqueeze(line)
lines.append("\t" + make_function(
"squeeze",
[NAME_DICT[parse[0]], NAME_DICT[parse[1]], NAME_DICT[parse[2]], parse[3]] + [round(co, PREC) for co in parse[4:]]
))
lines.append("}")
for c in node["children"]:
if c is not None and len(c) > 0:
if "prog_num" in c:
q.append(c)
return lines
def sawriteHierProg(hier_prog, outfile):
lines = sagetHierProgLines(hier_prog)
with open(outfile, 'w') as f:
for line in lines:
f.write(f'{line}\n')
def writeHierProg(hier_prog, prog_field, outfile):
lines = getHierProgLines(hier_prog, prog_field)
with open(outfile, 'w') as f:
for line in lines:
f.write(f'{line}\n')
def log_print(s, of):
with open(of, 'a') as f:
f.write(f"{s}\n")
print(s)
def generateObj(verts, faces):
faces = faces.clone()
faces += 1
obj = ""
for a, b, c in verts.tolist():
obj += f'v {a} {b} {c}\n'
for a, b, c in faces.tolist():
obj += f"f {a} {b} {c}\n"
return obj
def writeObj(verts, faces, outfile):
faces = faces.clone()
faces += 1
with open(outfile, 'w') as f:
for a, b, c in verts.tolist():
f.write(f'v {a} {b} {c}\n')
for a, b, c in faces.tolist():
f.write(f"f {a} {b} {c}\n")
def writePC(pc, fn):
with open(fn, 'w') as f:
for a, b, c, _, _, _ in pc:
f.write(f'v {a} {b} {c} \n')
def writeSPC(pc, fn):
with open(fn, 'w') as f:
for a, b, c in pc:
f.write(f'v {a} {b} {c} \n')
def face_areas_normals(faces, vs):
face_normals = torch.cross(
vs[:, faces[:, 1], :] - vs[:, faces[:, 0], :],
vs[:, faces[:, 2], :] - vs[:, faces[:, 1], :],
dim=2,
)
face_areas = torch.norm(face_normals, dim=2) + 1e-8
face_normals = face_normals / face_areas[:, :, None]
face_areas = 0.5 * face_areas
return face_areas, face_normals
def sample_surface(faces, vs, count, return_normals=True):
"""
sample mesh surface
sample method:
http://mathworld.wolfram.com/TrianglePointPicking.html
Args
---------
vs: vertices (batch x nvs x 3d coordinate)
faces: triangle faces (torch.long) (num_faces x 3)
count: number of samples
Return
---------
samples: (count, 3) points in space on the surface of mesh
face_index: (count,) indices of faces for each sampled point
"""
if torch.isnan(faces).any() or torch.isnan(vs).any():
assert False, 'saw nan in sample_surface'
device = vs.device
bsize, nvs, _ = vs.shape
area, normal = face_areas_normals(faces, vs)
area_sum = torch.sum(area, dim=1)
assert not (area <= 0.0).any().item(
), "Saw negative probability while sampling"
assert not (area_sum <= 0.0).any().item(
), "Saw negative probability while sampling"
assert not (area > 1000000.0).any().item(), "Saw inf"
assert not (area_sum > 1000000.0).any().item(), "Saw inf"
dist = torch.distributions.categorical.Categorical(
probs=area / (area_sum[:, None]))
face_index = dist.sample((count,))
# pull triangles into the form of an origin + 2 vectors
tri_origins = vs[:, faces[:, 0], :]
tri_vectors = vs[:, faces[:, 1:], :].clone()
tri_vectors -= tri_origins.repeat(1, 1,
2).reshape((bsize, len(faces), 2, 3))
# pull the vectors for the faces we are going to sample from
face_index = face_index.transpose(0, 1)
face_index = face_index[:, :, None].expand((bsize, count, 3))
tri_origins = torch.gather(tri_origins, dim=1, index=face_index)
face_index2 = face_index[:, :, None, :].expand((bsize, count, 2, 3))
tri_vectors = torch.gather(tri_vectors, dim=1, index=face_index2)
# randomly generate two 0-1 scalar components to multiply edge vectors by
random_lengths = torch.rand(
count, 2, 1, device=vs.device, dtype=tri_vectors.dtype)
# points will be distributed on a quadrilateral if we use 2x [0-1] samples
# if the two scalar components sum less than 1.0 the point will be
# inside the triangle, so we find vectors longer than 1.0 and
# transform them to be inside the triangle
random_test = random_lengths.sum(dim=1).reshape(-1) > 1.0
random_lengths[random_test] -= 1.0
random_lengths = torch.abs(random_lengths)
# multiply triangle edge vectors by the random lengths and sum
sample_vector = (tri_vectors * random_lengths[None, :]).sum(dim=2)
# finally, offset by the origin to generate
# (n,3) points in space on the triangle
samples = sample_vector + tri_origins
if return_normals:
samples = torch.cat((samples, torch.gather(
normal, dim=1, index=face_index)), dim=2)
return samples
else:
return samples
| 21,643 | 28.013405 | 133 | py |
ShapeMOD | ShapeMOD-main/SA_lang/parse_files/old_intersect.py | import torch
import itertools
import trimesh
import scipy
import numpy as np
import faiss
from copy import deepcopy
DOING_PARSE = True
DIM = 20
ATT_DIM = 50
device = torch.device("cuda")
resource = faiss.StandardGpuResources()
def robust_norm(var, dim=2):
return ((var ** 2).sum(dim=dim) + 1e-8).sqrt()
class collisionEngine():
def __init__(self, device):
self.dimension = 3
self.k = 1
self.device = device
def search_nn(self, index, query, k):
_, I = index.search(query, k)
I_var = torch.from_numpy(np.ascontiguousarray(I).astype(np.int64))
I_var = I_var.to(self.device)
return I_var
def findPCInter(self, predict_pc, gt_pc, index_predict, index_gt, thresh):
predict_pc = predict_pc.to(self.device)
gt_pc = gt_pc.to(self.device)
predict_pc_size = predict_pc.size()
gt_pc_size = gt_pc.size()
predict_pc_np = np.ascontiguousarray(
torch.transpose(predict_pc.data.clone(), 1, 2).cpu().numpy()
) # BxMx3
gt_pc_np = np.ascontiguousarray(
torch.transpose(gt_pc.data.clone(), 1, 2).cpu().numpy()
) # BxNx3
# selected_gt: Bxkx3xM
selected_gt_by_predict = torch.FloatTensor(
predict_pc_size[0], self.k, predict_pc_size[1], predict_pc_size[2]
)
# selected_predict: Bxkx3xN
selected_predict_by_gt = torch.FloatTensor(
gt_pc_size[0], self.k, gt_pc_size[1], gt_pc_size[2]
)
selected_gt_by_predict = selected_gt_by_predict.to(self.device)
selected_predict_by_gt = selected_predict_by_gt.to(self.device)
# process each batch independently.
for i in range(predict_pc_np.shape[0]):
# database is gt_pc, predict_pc -> gt_pc -----------------------------------------------------------
I_var = self.search_nn(index_gt, predict_pc_np[i], self.k)
# process nearest k neighbors
for k in range(self.k):
selected_gt_by_predict[i, k, ...] = gt_pc[i].index_select(
1, I_var[:, k]
)
# database is predict_pc, gt_pc -> predict_pc -------------------------------------------------------
I_var = self.search_nn(index_predict, gt_pc_np[i], self.k)
# process nearest k neighbors
for k in range(self.k):
selected_predict_by_gt[i, k, ...] = predict_pc[i].index_select(
1, I_var[:, k]
)
# compute loss ===================================================
# selected_gt(Bxkx3xM) vs predict_pc(Bx3xM)
r_to_gt = predict_pc.unsqueeze(1).expand_as(selected_gt_by_predict) - selected_gt_by_predict
gt_to_r = selected_predict_by_gt - gt_pc.unsqueeze(1).expand_as(selected_predict_by_gt)
r_to_gt = robust_norm(
selected_gt_by_predict
- predict_pc.unsqueeze(1).expand_as(selected_gt_by_predict),
dim=2
)
gt_to_r = robust_norm(
selected_predict_by_gt
- gt_pc.unsqueeze(1).expand_as(selected_predict_by_gt),
dim=2
)
r_to_gt = r_to_gt.flatten()
gt_to_r = gt_to_r.flatten()
return (r_to_gt < thresh).nonzero().squeeze(), (gt_to_r < thresh).nonzero().squeeze()
if DOING_PARSE:
a = (torch.arange(DIM).float()/(DIM-1))
b = a.unsqueeze(0).unsqueeze(0).repeat(DIM, DIM, 1)
c = a.unsqueeze(0).unsqueeze(2).repeat(DIM, 1, DIM)
d = a.unsqueeze(1).unsqueeze(2).repeat(1, DIM, DIM)
g = torch.stack((b,c,d), dim=3).view(-1, 3).to(device)
fr = (g[:,0] == 1.).nonzero().squeeze().to(device)
fl = (g[:,0] == 0.).nonzero().squeeze().to(device)
ft = (g[:,1] == 1.).nonzero().squeeze().to(device)
fbo = (g[:,1] == 0.).nonzero().squeeze().to(device)
ff = (g[:,2] == 1.).nonzero().squeeze().to(device)
fba = (g[:,2] == 0.).nonzero().squeeze().to(device)
bb_mask = torch.ones((DIM**3), 3).float().to(device) * 100
bb_mask[torch.cat((ft, fbo)), :] *= 0.
top_mask = torch.ones((DIM**3), 3).float().to(device) * 100
bot_mask = torch.ones((DIM**3), 3).float().to(device) * 100
top_mask[ft] *= 0.
bot_mask[fbo] *= 0.
s_xyz = g.unsqueeze(0).to(device)
c_xyz = torch.tensor([
[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1],
]).unsqueeze(0).float()
colEngine = collisionEngine(device)
atta = (torch.arange(ATT_DIM).float()/(ATT_DIM-1))
attb = atta.unsqueeze(0).unsqueeze(0).repeat(ATT_DIM, ATT_DIM, 1)
attc = atta.unsqueeze(0).unsqueeze(2).repeat(ATT_DIM, 1, ATT_DIM)
attd = atta.unsqueeze(1).unsqueeze(2).repeat(1, ATT_DIM, ATT_DIM)
attg = torch.stack((attb,attc,attd), dim=3).view(-1, 3)
attxyz = attg.unsqueeze(0).to(device)
def smp_pt(geom, pt):
xdir = geom[6:9] / (geom[6:9].norm() + 1e-8)
ydir = geom[9:12] / (geom[9:12].norm() + 1e-8)
zdir = torch.cross(xdir, ydir)
r = torch.stack((
xdir,
ydir,
zdir
)).T
return (r @ ((pt -.5) * geom[:3])) + geom[3:6]
def findHiddenCubes(cubes):
cube_geom = []
for c in cubes:
cube_geom.append(torch.cat((
c['xd'].unsqueeze(0),
c['yd'].unsqueeze(0),
c['zd'].unsqueeze(0),
c['center'],
c['xdir'],
c['ydir']
)))
scene_geom = torch.stack([c for c in cube_geom]).to(device)
scene_corners = sampleCorners(cubes).view(-1,8,3)
bad_inds = []
for ind in range(len(cubes)):
points = scene_corners[ind,:,:]
all_inside = False
for nind in range(len(cubes)):
if nind == ind:
continue
if all_inside:
break
O = smp_pt(scene_geom[nind], torch.zeros(3).to(device))
A = torch.stack([
scene_geom[nind][0] * scene_geom[nind][6:9],
scene_geom[nind][1] * scene_geom[nind][9:12],
scene_geom[nind][2] * torch.cross(
scene_geom[nind][6:9], scene_geom[nind][9:12]
)
]).T
B = (points.T - O.unsqueeze(1)).cpu()
p = torch.tensor(np.linalg.solve(A.cpu(), B.cpu())).T
if p.min() >= -0.01 and p.max() <= 1.01:
all_inside = True
if all_inside:
bad_inds.append(ind)
return bad_inds
def findOverlapCubes(cubes, CTHRESH):
cube_geom = []
for c in cubes:
cube_geom.append(torch.cat((
c['xd'].unsqueeze(0),
c['yd'].unsqueeze(0),
c['zd'].unsqueeze(0),
c['center'],
c['xdir'],
c['ydir']
)))
scene_geom = torch.stack([c for c in cube_geom]).to(device)
scene_corners = sampleCube(cubes).view(-1,DIM**3,3)
bad_inds = set()
for ind in range(len(cubes)):
points = scene_corners[ind,:,:]
covered = False
for nind in range(len(cubes)):
if nind == ind or nind in bad_inds:
continue
if covered:
break
O = smp_pt(scene_geom[nind], torch.zeros(3).to(device))
A = torch.stack([
scene_geom[nind][0] * scene_geom[nind][6:9],
scene_geom[nind][1] * scene_geom[nind][9:12],
scene_geom[nind][2] * torch.cross(
scene_geom[nind][6:9], scene_geom[nind][9:12]
)
]).T
B = (points.T - O.unsqueeze(1)).cpu()
p = torch.tensor(np.linalg.solve(A.cpu(), B.cpu())).T
num_outside_s = (p - torch.clamp(p, 0.0, 1.0)).abs().sum(dim=1).nonzero().squeeze().shape
if len(num_outside_s) == 0 or (((DIM**3 - num_outside_s[0])*1.) / DIM**3) > CTHRESH:
covered = True
if covered:
bad_inds.add(ind)
return bad_inds
def samplePC(cubes, flip_bbox=False, split_bbox=False):
cube_geom = []
for c in cubes:
cube_geom.append(torch.cat((
c['xd'].unsqueeze(0),
c['yd'].unsqueeze(0),
c['zd'].unsqueeze(0),
c['center'],
c['xdir'],
c['ydir']
)))
scene_geom = torch.stack([c for c in cube_geom]).to(device)
ind_to_pc = {}
for i in range(0, scene_geom.shape[0]):
xyz = s_xyz
s_inds = (torch.ones(1,xyz.shape[1]) * i).long().to(device)
s_r = torch.cat(
(
(scene_geom[s_inds][:, :, 6:9] / (scene_geom[s_inds][:, :, 6:9].norm(dim=2).unsqueeze(2) + 1e-8)).unsqueeze(3),
(scene_geom[s_inds][:, :, 9:12] / (scene_geom[s_inds][:, :, 9:12].norm(dim=2).unsqueeze(2) + 1e-8)).unsqueeze(3),
torch.cross(
scene_geom[s_inds][:, :, 6:9] / (scene_geom[s_inds][:, :, 6:9].norm(dim=2).unsqueeze(2) + 1e-8),
scene_geom[s_inds][:, :, 9:12] / (scene_geom[s_inds][:, :, 9:12].norm(dim=2).unsqueeze(2) + 1e-8)
).unsqueeze(3)
), dim = 3)
s_out = ((s_r @ (((xyz - .5) * scene_geom[s_inds][:, :, :3]).unsqueeze(-1))).squeeze() + scene_geom[s_inds][:, :, 3:6]).squeeze()
ind_to_pc[i] = s_out
if flip_bbox:
ind_to_pc[0] += bb_mask
temp = ind_to_pc[0].clone()
ind_to_pc[0][ft] = temp[fbo]
ind_to_pc[0][fbo] = temp[ft]
if split_bbox:
bbox_pc = ind_to_pc.pop(0)
ind_to_pc[-2] = bbox_pc.clone() + bot_mask
ind_to_pc[-1] = bbox_pc.clone() + top_mask
res = {}
for key in ind_to_pc:
index_cpu = faiss.IndexFlatL2(3)
index = faiss.index_cpu_to_gpu(
resource,
torch.cuda.current_device(),
index_cpu
)
index.add(
np.ascontiguousarray(ind_to_pc[key].cpu().numpy())
)
res[key] = (ind_to_pc[key], index)
return res, scene_geom
def sampleCorners(cubes):
cube_geom = []
for c in cubes:
cube_geom.append(torch.cat((
c['xd'].unsqueeze(0),
c['yd'].unsqueeze(0),
c['zd'].unsqueeze(0),
c['center'],
c['xdir'],
c['ydir']
)))
scene_geom = torch.stack([c for c in cube_geom]).to(device)
s_inds = torch.arange(scene_geom.shape[0]).unsqueeze(1).repeat(1,8).view(1, -1).to(device)
xyz = c_xyz.repeat(1,scene_geom.shape[0],1).to(device)
s_r = torch.cat(
(
(scene_geom[s_inds][:, :, 6:9] / (scene_geom[s_inds][:, :, 6:9].norm(dim=2).unsqueeze(2) + 1e-8)).unsqueeze(3),
(scene_geom[s_inds][:, :, 9:12] / (scene_geom[s_inds][:, :, 9:12].norm(dim=2).unsqueeze(2) + 1e-8)).unsqueeze(3),
torch.cross(
scene_geom[s_inds][:, :, 6:9] / (scene_geom[s_inds][:, :, 6:9].norm(dim=2).unsqueeze(2) + 1e-8),
scene_geom[s_inds][:, :, 9:12] / (scene_geom[s_inds][:, :, 9:12].norm(dim=2).unsqueeze(2) + 1e-8)
).unsqueeze(3)
), dim = 3)
s_out = ((s_r @ (((xyz - .5) * scene_geom[s_inds][:, :, :3]).unsqueeze(-1))).squeeze() + scene_geom[s_inds][:, :, 3:6]).squeeze()
return s_out
def sampleCube(cubes):
cube_geom = []
for c in cubes:
cube_geom.append(torch.cat((
c['xd'].unsqueeze(0),
c['yd'].unsqueeze(0),
c['zd'].unsqueeze(0),
c['center'],
c['xdir'],
c['ydir']
)))
scene_geom = torch.stack([c for c in cube_geom]).to(device)
s_inds = torch.arange(scene_geom.shape[0]).unsqueeze(1).repeat(1,DIM**3).view(1, -1).to(device)
xyz = s_xyz.repeat(1,scene_geom.shape[0],1).to(device)
s_r = torch.cat(
(
(scene_geom[s_inds][:, :, 6:9] / (scene_geom[s_inds][:, :, 6:9].norm(dim=2).unsqueeze(2) + 1e-8)).unsqueeze(3),
(scene_geom[s_inds][:, :, 9:12] / (scene_geom[s_inds][:, :, 9:12].norm(dim=2).unsqueeze(2) + 1e-8)).unsqueeze(3),
torch.cross(
scene_geom[s_inds][:, :, 6:9] / (scene_geom[s_inds][:, :, 6:9].norm(dim=2).unsqueeze(2) + 1e-8),
scene_geom[s_inds][:, :, 9:12] / (scene_geom[s_inds][:, :, 9:12].norm(dim=2).unsqueeze(2) + 1e-8)
).unsqueeze(3)
), dim = 3)
s_out = ((s_r @ (((xyz - .5) * scene_geom[s_inds][:, :, :3]).unsqueeze(-1))).squeeze() + scene_geom[s_inds][:, :, 3:6]).squeeze()
return s_out
def findInters(ind_to_pc, scene_geom, ind_pairs=None):
inters = {}
if ind_pairs is None:
l = list(ind_to_pc.keys())
l.sort()
ind_pairs = [(a,b) for a,b in list(
itertools.product(l,l)
) if a < b ]
for ind1, ind2 in ind_pairs:
# This makes us catch more bbox intersections
thresh = scene_geom[[0 if ind1 < 0 else ind1, ind2]][:,:3].max() / DIM
c1_inds, c2_inds = colEngine.findPCInter(
ind_to_pc[ind1][0].T.unsqueeze(0),
ind_to_pc[ind2][0].T.unsqueeze(0),
ind_to_pc[ind1][1],
ind_to_pc[ind2][1],
thresh,
)
if len(c1_inds.shape) == 0 or len(c2_inds.shape) == 0 or c1_inds.shape[0] == 0 or c2_inds.shape[0] == 0:
continue
inters[f"{ind1}_{ind2}"] = (c1_inds, c2_inds)
return inters
def vector_cos(norm1, norm2):
norm1 = np.asarray(norm1)
norm2 = np.asarray(norm2)
dot = np.dot(norm1, norm2)
magnitude = np.linalg.norm(norm1) * np.linalg.norm(norm2)
if magnitude == 0.:
return 0.
return dot / float(magnitude)
def orientProps(center, xd, yd, zd, xdir, ydir, zdir):
rt = np.asarray([1., 0., 0.])
up = np.asarray([0., 1., 0.])
fwd = np.asarray([0., 0., 1.])
l = [
(xdir, xd, 0),
(ydir, yd, 1),
(zdir, zd, 2),
(-1 * xdir, xd, 3),
(-1 * ydir, yd, 4),
(-1 * zdir, zd, 5)
]
rtdir, rtd, rind = sorted(
deepcopy(l), key=lambda x: vector_cos(rt, x[0]))[-1]
if rind >= 3:
l.pop(rind)
l.pop((rind+3) % 6)
else:
l.pop((rind+3) % 6)
l.pop(rind)
for i in range(0, 4):
p_ind = l[i][2]
if p_ind > max(rind, (rind+3) % 6):
l[i] = (l[i][0], l[i][1], l[i][2] - 2)
elif p_ind > min(rind, (rind+3) % 6):
l[i] = (l[i][0], l[i][1], l[i][2] - 1)
updir, upd, upind = sorted(
deepcopy(l), key=lambda x: vector_cos(up, x[0]))[-1]
if upind >= 2:
l.pop(upind)
l.pop((upind+2) % 4)
else:
l.pop((upind+2) % 4)
l.pop(upind)
fwdir, fwd, _ = sorted(l, key=lambda x: vector_cos(fwd, x[0]))[-1]
return {
'center': torch.tensor(center).float(),
'xd': torch.tensor(rtd).float(),
'yd': torch.tensor(upd).float(),
'zd': torch.tensor(fwd).float(),
'xdir': torch.tensor(rtdir).float(),
'ydir': torch.tensor(updir).float(),
'zdir': torch.tensor(fwdir).float()
}
def points_obb(points, precision):
try:
to_origin, size = trimesh.bounds.oriented_bounds(points, angle_digits=precision)
center = to_origin[:3, :3].transpose().dot(-to_origin[:3, 3])
xdir = to_origin[0, :3]
ydir = to_origin[1, :3]
except scipy.spatial.qhull.QhullError:
print('WARNING: falling back to PCA OBB computation since the more accurate minimum OBB computation failed.')
center = points.mean(axis=0, keepdims=True)
points = points - center
center = center[0, :]
pca = PCA()
pca.fit(points)
pcomps = pca.components_
points_local = np.matmul(pcomps, points.transpose()).transpose()
size = points_local.max(axis=0) - points_local.min(axis=0)
xdir = pcomps[0, :]
ydir = pcomps[1, :]
box = torch.from_numpy(np.hstack([center, size, xdir, ydir]).reshape(1, -1)).to(torch.float32)
box = box.cpu().numpy().squeeze()
center = box[:3]
size = box[3:6]
xdir = box[6:9]
xdir /= np.linalg.norm(xdir)
ydir = box[9:]
ydir /= np.linalg.norm(ydir)
zdir = np.cross(xdir, ydir)
zdir /= np.linalg.norm(zdir)
return orientProps(center, size[0], size[1], size[2], xdir, ydir, zdir)
# Checks if any cube1 face midpoints are inside pc2
def checkFaces(cube1, cube2, scene_geom):
ind1 = 0 if cube1 < 0 else cube1
ind2 = 0 if cube2 < 0 else cube2
faces = {
'right': (torch.tensor([1.0,0.5,0.5],device=device), 0, 0),
'left': (torch.tensor([0.0,0.5,0.5],device=device), 0, 1),
'top': (torch.tensor([0.5,1.0,0.5],device=device), 1, 0),
'bot': (torch.tensor([0.5,0.0,0.5],device=device), 1, 1),
'front': (torch.tensor([0.5,0.5,1.0],device=device), 2, 0),
'back': (torch.tensor([0.5,0.5,0.0],device=device), 2, 1),
}
if cube2 == -1:
faces.pop('bot')
faces.pop('left')
faces.pop('right')
faces.pop('front')
faces.pop('back')
if cube2 == -2:
faces.pop('top')
faces.pop('left')
faces.pop('right')
faces.pop('front')
faces.pop('back')
best_score = 1e8
best_face= None
best_pt = None
O = smp_pt(scene_geom[ind2], torch.zeros(3).to(device))
A = torch.stack([
scene_geom[ind2][0] * scene_geom[ind2][6:9],
scene_geom[ind2][1] * scene_geom[ind2][9:12],
scene_geom[ind2][2] * torch.cross(
scene_geom[ind2][6:9], scene_geom[ind2][9:12]
)
]).T
for face in faces:
smpt, fi, ft = faces[face]
pt = smp_pt(scene_geom[ind1], smpt)
B = pt - O
p = torch.tensor(np.linalg.solve(A.cpu(), B.cpu()))
if p.min() < -0.01 or p.max() > 1.01:
continue
if ind2 == 0 and (p[1] >= .05 and p[1] <= .95):
continue
score = (p[fi] - ft).abs()
if score < best_score:
best_face = face
best_score = score
best_pt = pt
return best_pt
def calcAttPoint(pair, pcs, scene_geom, ind_to_pc):
cube1 = int(pair.split('_')[0])
cube2 = int(pair.split('_')[1])
ind1 = cube1 if cube1 > 0 else 0
ind2 = cube2 if cube2 > 0 else 0
if ind1 == ind2:
return None
pc1, pc2 = pcs
dim1 = scene_geom[ind1][0] * scene_geom[ind1][1] * scene_geom[ind1][2]
dim2 = scene_geom[ind2][0] * scene_geom[ind2][1] * scene_geom[ind2][2]
att_point = None
if cube1 > 0:
att_point = checkFaces(cube1, cube2, scene_geom)
if att_point is None and cube2 > 0:
att_point = checkFaces(cube2, cube1, scene_geom)
if att_point is None:
if ind1 > ind2:
att_point = getBestATTPoint(pc1, pc2, cube1, cube2, ind_to_pc, ind1, ind2, scene_geom)
else:
att_point = getBestATTPoint(pc2, pc1, cube2, cube1, ind_to_pc, ind2, ind1, scene_geom)
if att_point is None:
return
# att_point is point in 3D space, transform into local coordinate frames and reutrn
collision = [ind1, ind2, None, None]
for i, ind in ((2, ind1), (3, ind2)):
O = smp_pt(scene_geom[ind], torch.zeros(3).to(device))
A = torch.stack([
scene_geom[ind][0] * scene_geom[ind][6:9],
scene_geom[ind][1] * scene_geom[ind][9:12],
scene_geom[ind][2] * torch.cross(
scene_geom[ind][6:9], scene_geom[ind][9:12]
)
]).T
B = att_point - O
p = np.linalg.solve(A.cpu(), B.cpu())
p = np.clip(p, 0.0, 1.0)
collision[i] = p.tolist()
return collision
def calcAnyPoint(pair, pcs, scene_geom, ind_to_pc):
cube1 = int(pair.split('_')[0])
cube2 = int(pair.split('_')[1])
ind1 = cube1 if cube1 > 0 else 0
ind2 = cube2 if cube2 > 0 else 0
if ind1 == ind2:
return None
pc1, pc2 = pcs
dim1 = scene_geom[ind1][0] * scene_geom[ind1][1] * scene_geom[ind1][2]
dim2 = scene_geom[ind2][0] * scene_geom[ind2][1] * scene_geom[ind2][2]
att_point = None
att_point = getanATTPoint(pc1, pc2, cube1, cube2, ind_to_pc, ind1, ind2, scene_geom)
if att_point is None:
att_point = getanATTPoint(pc2, pc1, cube2, cube1, ind_to_pc, ind2, ind1, scene_geom)
if att_point is None:
return
# att_point is point in 3D space, transform into local coordinate frames and reutrn
collision = [ind1, ind2, None, None]
for i, ind in ((2, ind1), (3, ind2)):
O = smp_pt(scene_geom[ind], torch.zeros(3).to(device))
A = torch.stack([
scene_geom[ind][0] * scene_geom[ind][6:9],
scene_geom[ind][1] * scene_geom[ind][9:12],
scene_geom[ind][2] * torch.cross(
scene_geom[ind][6:9], scene_geom[ind][9:12]
)
]).T
B = att_point - O
p = np.linalg.solve(A.cpu(), B.cpu())
p = np.clip(p, 0.0, 1.0)
collision[i] = p.tolist()
return collision
def calcAttachments(inters, scene_geom, ind_to_pc):
attachments = []
for pair in inters:
attachments.append(calcAttPoint(pair, inters[pair], scene_geom, ind_to_pc))
return [a for a in attachments if a is not None]
# Takes in intersections and an ind
# Calculates intersection of all covered points
# Checks amount that each face can be shortened
# Updates parts to be shortened
# Returns None if nothing shortened
# Returns tuple of intersections to recalculate
def shorten_cube(inters, parts, ind, scene_geom):
cov_inds = torch.zeros((DIM**3)).float().to(device)
cov_inters = []
nparts = []
for pair in inters:
ind1 = int(pair.split('_')[0])
ind2 = int(pair.split('_')[1])
if ind1 == ind:
cov_inters.append(pair)
cov_inds[inters[pair][0]] = 1.
nparts.append(ind2)
elif ind2 == ind:
cov_inters.append(pair)
cov_inds[inters[pair][1]] = 1.
nparts.append(ind1)
if cov_inds.sum().item() == 0:
return None
face_shorten = {}
dirs = {
'right': (fr, torch.tensor([-.01, 0., 0.]).unsqueeze(0).repeat(400,1), 0),
'left': (fl, torch.tensor([.01, 0., 0.]).unsqueeze(0).repeat(400,1), 0),
'top': (ft, torch.tensor([0., -.01, 0.]).unsqueeze(0).repeat(400,1), 1),
'bot': (fbo, torch.tensor([0., .01, 0.]).unsqueeze(0).repeat(400,1), 1),
'front': (ff, torch.tensor([0., 0., -.01]).unsqueeze(0).repeat(400,1), 2),
'back': (fba, torch.tensor([0., 0., .01]).unsqueeze(0).repeat(400,1), 2)
}
cube_geom = scene_geom[ind]
p_r = torch.cat(
(
(cube_geom[6:9] / (cube_geom[6:9].norm(dim=0).unsqueeze(0) + 1e-8)).unsqueeze(1),
(cube_geom[9:12] / (cube_geom[9:12].norm(dim=0).unsqueeze(0) + 1e-8)).unsqueeze(1),
torch.cross(
cube_geom[6:9] / (cube_geom[6:9].norm(dim=0).unsqueeze(0) + 1e-8),
cube_geom[9:12] / (cube_geom[9:12].norm(dim=0).unsqueeze(0) + 1e-8)
).unsqueeze(1)
), dim = 1)
for d in dirs:
dinds, inc, ki = dirs[d]
inc = inc.to(device)
for nd in range(101):
points = ((p_r @ ((((g[dinds] + (inc * nd)).unsqueeze(0) - .5) * cube_geom[:3]).unsqueeze(-1))).squeeze() + cube_geom[3:6]).squeeze()
inside = False
for nind in nparts:
if inside:
break
O = smp_pt(scene_geom[nind], torch.zeros(3).to(device))
A = torch.stack([
scene_geom[nind][0] * scene_geom[nind][6:9],
scene_geom[nind][1] * scene_geom[nind][9:12],
scene_geom[nind][2] * torch.cross(
scene_geom[nind][6:9], scene_geom[nind][9:12]
)
]).T
B = (points.T - O.unsqueeze(1)).cpu()
p = torch.tensor(np.linalg.solve(A.cpu(), B.cpu())).T
if p.min() >= -0.01 and p.max() <= 1.01:
inside = True
if not inside:
break
face_shorten[d] = max((nd-1) / 100, 0)
dims = [
('right', 'left', 'xd', 'xdir'),
('top', 'bot', 'yd', 'ydir'),
('front', 'back', 'zd', 'zdir')
]
for d1, d2, key, dn in dims:
if face_shorten[d1] > 0 or face_shorten[d2] > 0:
x = face_shorten[d1] * parts[ind][key]
y = face_shorten[d2] * parts[ind][key]
parts[ind][key] -= x + y
parts[ind]['center'] += parts[ind][dn] * ((y-x) / 2)
return cov_inters
def getBestATTPoint(pc1, pc2, cube1, cube2, ind_to_pc, ind1, ind2, scene_geom):
a = ind_to_pc[cube1][0][pc1]
b = ind_to_pc[cube2][0][pc2]
joint = torch.cat((a, b), dim=0)
jmin = joint.min(dim=0).values
jmax = joint.max(dim=0).values
xd = (jmax[0] - jmin[0]).abs()
yd = (jmax[1] - jmin[1]).abs()
zd = (jmax[2] - jmin[2]).abs()
center = (jmax + jmin) / 2
bbox_geom = torch.cat((
xd.unsqueeze(0),
yd.unsqueeze(0),
zd.unsqueeze(0),
center,
torch.tensor([1.0,0.0,0.0]).to(device),
torch.tensor([0.0,1.0,0.0]).to(device)
))
p_r = torch.cat(
(
(bbox_geom[6:9] / (bbox_geom[6:9].norm(dim=0).unsqueeze(0) + 1e-8)).unsqueeze(1),
(bbox_geom[9:12] / (bbox_geom[9:12].norm(dim=0).unsqueeze(0) + 1e-8)).unsqueeze(1),
torch.cross(
bbox_geom[6:9] / (bbox_geom[6:9].norm(dim=0).unsqueeze(0) + 1e-8),
bbox_geom[9:12] / (bbox_geom[9:12].norm(dim=0).unsqueeze(0) + 1e-8)
).unsqueeze(1)
), dim = 1)
points = ((p_r @ (((attxyz - .5) * bbox_geom[:3]).unsqueeze(-1))).squeeze() + bbox_geom[3:6]).squeeze()
# for each point find atp1 and atp2
O1 = smp_pt(scene_geom[ind1], torch.zeros(3).to(device))
A1 = torch.stack([
scene_geom[ind1][0] * scene_geom[ind1][6:9],
scene_geom[ind1][1] * scene_geom[ind1][9:12],
scene_geom[ind1][2] * torch.cross(
scene_geom[ind1][6:9], scene_geom[ind1][9:12]
)
]).T
O2 = smp_pt(scene_geom[ind2], torch.zeros(3).to(device))
A2 = torch.stack([
scene_geom[ind2][0] * scene_geom[ind2][6:9],
scene_geom[ind2][1] * scene_geom[ind2][9:12],
scene_geom[ind2][2] * torch.cross(
scene_geom[ind2][6:9], scene_geom[ind2][9:12]
)
]).T
B1 = (points.T - O1.unsqueeze(1)).cpu()
B2 = (points.T - O2.unsqueeze(1)).cpu()
atp1 = torch.tensor(np.linalg.lstsq(A1.cpu(), B1, rcond=None)[0]).T
atp2 = torch.tensor(np.linalg.lstsq(A2.cpu(), B2, rcond=None)[0]).T
atps = torch.cat((atp1, atp2), dim = 1)
ne_inds = (((atps >= -0.01).sum(dim=1) == 6).int() + ((atps <= 1.01).sum(dim=1) == 6)).int()
if ind2 == 0:
for i in range(1, 6):
offset = 0.01 * i
bb_ne_inds = (((atps[:, 4] <= offset).int() + (atps[:, 4] >= (1-offset)).int()) == 1).int()
bb_ne_inds += ne_inds.int()
bb_ne_inds = (bb_ne_inds == 3).nonzero().squeeze()
if bb_ne_inds.sum() > 0:
return points[bb_ne_inds].mean(dim=0)
return None
ne_inds = (ne_inds == 2).nonzero().squeeze()
if ne_inds.sum() > 0:
return points[ne_inds].mean(dim=0)
return None
def getanATTPoint(pc1, pc2, cube1, cube2, ind_to_pc, ind1, ind2, scene_geom):
a = ind_to_pc[cube1][0][pc1]
b = ind_to_pc[cube2][0][pc2]
joint = torch.cat((a, b), dim=0)
jmin = joint.min(dim=0).values
jmax = joint.max(dim=0).values
xd = (jmax[0] - jmin[0]).abs()
yd = (jmax[1] - jmin[1]).abs()
zd = (jmax[2] - jmin[2]).abs()
center = (jmax + jmin) / 2
bbox_geom = torch.cat((
xd.unsqueeze(0),
yd.unsqueeze(0),
zd.unsqueeze(0),
center,
torch.tensor([1.0,0.0,0.0]).to(device),
torch.tensor([0.0,1.0,0.0]).to(device)
))
p_r = torch.cat(
(
(bbox_geom[6:9] / (bbox_geom[6:9].norm(dim=0).unsqueeze(0) + 1e-8)).unsqueeze(1),
(bbox_geom[9:12] / (bbox_geom[9:12].norm(dim=0).unsqueeze(0) + 1e-8)).unsqueeze(1),
torch.cross(
bbox_geom[6:9] / (bbox_geom[6:9].norm(dim=0).unsqueeze(0) + 1e-8),
bbox_geom[9:12] / (bbox_geom[9:12].norm(dim=0).unsqueeze(0) + 1e-8)
).unsqueeze(1)
), dim = 1)
points = ((p_r @ (((attxyz - .5) * bbox_geom[:3]).unsqueeze(-1))).squeeze() + bbox_geom[3:6]).squeeze()
# for each point find atp1 and atp2
O1 = smp_pt(scene_geom[ind1], torch.zeros(3).to(device))
A1 = torch.stack([
scene_geom[ind1][0] * scene_geom[ind1][6:9],
scene_geom[ind1][1] * scene_geom[ind1][9:12],
scene_geom[ind1][2] * torch.cross(
scene_geom[ind1][6:9], scene_geom[ind1][9:12]
)
]).T
O2 = smp_pt(scene_geom[ind2], torch.zeros(3).to(device))
A2 = torch.stack([
scene_geom[ind2][0] * scene_geom[ind2][6:9],
scene_geom[ind2][1] * scene_geom[ind2][9:12],
scene_geom[ind2][2] * torch.cross(
scene_geom[ind2][6:9], scene_geom[ind2][9:12]
)
]).T
B1 = (points.T - O1.unsqueeze(1)).cpu()
B2 = (points.T - O2.unsqueeze(1)).cpu()
atp1 = torch.tensor(np.linalg.lstsq(A1.cpu(), B1, rcond=None)[0]).T
atp2 = torch.tensor(np.linalg.lstsq(A2.cpu(), B2, rcond=None)[0]).T
atps = torch.cat((atp1, atp2), dim = 1)
ne_inds = (((atps >= -0.05).sum(dim=1) == 6).int() + ((atps <= 1.05).sum(dim=1) == 6)).int()
if ind2 == 0:
for i in range(1, 6):
offset = 0.01 * i
bb_ne_inds = (((atps[:, 4] <= offset).int() + (atps[:, 4] >= (1-offset)).int()) == 1).int()
bb_ne_inds += ne_inds.int()
bb_ne_inds = (bb_ne_inds == 3).nonzero().squeeze()
if bb_ne_inds.sum() > 0:
return points[bb_ne_inds].mean(dim=0)
return None
ne_inds = (ne_inds == 2).nonzero().squeeze()
if ne_inds.sum() > 0:
return points[ne_inds].mean(dim=0)
return None
| 31,198 | 31.163918 | 149 | py |
ShapeMOD | ShapeMOD-main/SA_lang/parse_files/symmetry.py | import torch
import numpy as np
import json_parse as jp
import old_intersect as inter
from copy import deepcopy
import math
SDIM_THRESH = .15
SANG_THRESH = .1
SPOS_THRESH = .1
VATT_THRESH = .05
CATT_THRESH = .3
GROUP_THRESH = .1
CPT_THRESH = .1
def smp_pt(geom, pt):
xdir = geom[6:9] / (geom[6:9].norm() + 1e-8)
ydir = geom[9:12] / (geom[9:12].norm() + 1e-8)
zdir = torch.cross(xdir, ydir)
r = torch.stack((
xdir,
ydir,
zdir
)).T
return (r @ ((pt -.5) * geom[:3])) + geom[3:6]
def smp_rel_pos(geom, gpt):
O = smp_pt(geom, torch.zeros(3))
A = torch.stack([
geom[0] * geom[6:9],
geom[1] * geom[9:12],
geom[2] * torch.cross(
geom[6:9], geom[9:12]
)
]).T
B = gpt - O
p = torch.tensor(np.linalg.solve(A.cpu(), B.cpu()))
return p
def approx_same_dims(a, b, m):
if ((a['xd'] - b['xd']).abs() / m) > SDIM_THRESH or \
((a['yd'] - b['yd']).abs() / m) > SDIM_THRESH or \
((a['zd'] - b['zd']).abs() / m) > SDIM_THRESH:
return False
return True
def approx_same_point(a, b, m, thresh=SPOS_THRESH):
if (a-b).norm() / m > thresh:
return False
return True
# Checks if pt is a valid place to be for cuboid c
def isValidAtt(node, pt, cind):
ct = cubeToTensor(node['cubes'][cind])
O = smp_pt(ct, torch.zeros(3))
A = torch.stack([
ct[0] * ct[6:9],
ct[1] * ct[9:12],
ct[2] * torch.cross(
ct[6:9], ct[9:12]
)
]).T
B = pt - O
p = torch.tensor(np.linalg.solve(A.cpu(), B.cpu()))
if cind == 0 and (p[1] - .5).abs() < .5 - VATT_THRESH:
return False
if (p - .5).abs().max() >= .5 + VATT_THRESH:
return False
return True
# checks if pt location in c is close to att
def isCloseAtt(node, pt, cind, att):
ct = cubeToTensor(node['cubes'][cind])
O = smp_pt(ct, torch.zeros(3))
A = torch.stack([
ct[0] * ct[6:9],
ct[1] * ct[9:12],
ct[2] * torch.cross(
ct[6:9], ct[9:12]
)
]).T
B = pt - O
p = torch.tensor(np.linalg.solve(A.cpu(), B.cpu()))
if (p - torch.tensor(att)).norm() > CATT_THRESH:
return False
return True
# checks if pt location in c is close to att
def isClosePt(node, pt, cind, att):
ct = cubeToTensor(node['cubes'][cind])
spt = smp_pt(ct, torch.tensor(att))
if (pt - spt).norm() > CPT_THRESH:
return False
return True
def cubeToTensor(c):
return torch.cat((
c['xd'].unsqueeze(0),
c['yd'].unsqueeze(0),
c['zd'].unsqueeze(0),
c['center'],
c['xdir'],
c['ydir']
))
# Is there a translational symmetry from i -> o
# if so return (axis, scale in terms of bbox dimension)
def checkTranSym(node, i, o):
c_i = node['cubes'][i]
c_o = node['cubes'][o]
if not approx_same_dims(c_i, c_o, max(node['cubes'][0]['xd'], node['cubes'][0]['yd'], node['cubes'][0]['zd'])):
return None
cdir = c_o['center'] - c_i['center']
scale = cdir.norm()
cdir /= cdir.norm()
if cdir.dot(node['cubes'][0]['xdir']) > 1 - SANG_THRESH:
tn = 'tr_X'
td = node['cubes'][0]['xd']
tdir = node['cubes'][0]['xdir']
elif cdir.dot(node['cubes'][0]['ydir']) > 1 - SANG_THRESH:
tn = 'tr_Y'
td = node['cubes'][0]['yd']
tdir = node['cubes'][0]['ydir']
elif cdir.dot(node['cubes'][0]['zdir']) > 1 - SANG_THRESH:
tn = 'tr_Z'
td = node['cubes'][0]['zd']
tdir = node['cubes'][0]['zdir']
else:
return None
for n, at1 in node['pc_atts'][i]:
opt = smp_pt(
cubeToTensor(c_i),
torch.tensor(at1)
)
tpt = opt + (tdir * scale)
#if not isValidAtt(node, tpt, n) or not isCloseAtt(node, tpt, o, at1):
if (isValidAtt(node, opt, n) and not isValidAtt(node, tpt, n)) or not isClosePt(node, tpt, o, at1):
return None
return tn, (scale/td).item()
def getRefMatrixHomo(axis, center):
m = center
d = axis / axis.norm()
refmat = torch.stack((
torch.stack((1 - 2 * d[0] * d[0], -2 * d[0] * d[1], -2 * d[0] * d[2], 2 * d[0] * d[0] * m[0] + 2 * d[0] * d[1] * m[1] + 2 * d[0] * d[2] * m[2])),
torch.stack((-2 * d[1] * d[0], 1 - 2 * d[1] * d[1], -2 * d[1] * d[2], 2 * d[1] * d[0] * m[0] + 2 * d[1] * d[1] * m[1] + 2 * d[1] * d[2] * m[2])),
torch.stack((-2 * d[2] * d[0], -2 * d[2] * d[1], 1 - 2 * d[2] * d[2], 2 * d[2] * d[0] * m[0] + 2 * d[2] * d[1] * m[1] + 2 * d[2] * d[2] * m[2]))
))
return refmat
def reflect_cube(c, center, ndir):
ref_c = {}
pad = torch.nn.ConstantPad1d((0, 1), 1.0)
reflection = getRefMatrixHomo(ndir, center)
nreflection = torch.cat((reflection[:,:3], torch.zeros(3,1)), dim=1)
posHomo = pad(c['center'])
ref_c['center'] = reflection @ posHomo
xHomo = pad(c['xdir'])
yHomo = pad(c['ydir'])
zHomo = pad(c['zdir'])
ref_c['xdir'] = nreflection @ xHomo
ref_c['ydir'] = nreflection @ yHomo
ref_c['zdir'] = nreflection @ zHomo
return ref_c
def reflect_point(p, center, ndir):
pad = torch.nn.ConstantPad1d((0, 1), 1.0)
reflection = getRefMatrixHomo(ndir, center)
posHomo = pad(p)
return reflection @ posHomo
# Is there reflectional symmetry from i->o
# If so return (plane)
def checkRefSym(node, i, o):
c_i = node['cubes'][i]
c_o = node['cubes'][o]
if not approx_same_dims(c_i, c_o, max(node['cubes'][0]['xd'], node['cubes'][0]['yd'], node['cubes'][0]['zd'])):
return None
cdir = c_o['center'] - c_i['center']
cdir /= cdir.norm()
if cdir.dot(node['cubes'][0]['xdir']) > 1 - SANG_THRESH:
rn = 'ref_X'
rdir = node['cubes'][0]['xdir']
elif cdir.dot(node['cubes'][0]['ydir']) > 1 - SANG_THRESH:
rn = 'ref_Y'
rdir = node['cubes'][0]['ydir']
elif cdir.dot(node['cubes'][0]['zdir']) > 1 - SANG_THRESH:
rn = 'ref_Z'
rdir = node['cubes'][0]['zdir']
else:
return None
center = smp_pt(cubeToTensor(node['cubes'][0]), torch.tensor([.5, .5, .5]))
c_ref = reflect_cube(c_i, center, rdir)
if not approx_same_point(
c_ref['center'],
c_o['center'],
max(node['cubes'][0]['xd'], node['cubes'][0]['yd'], node['cubes'][0]['zd'])
):
return None
for n, at1 in node['pc_atts'][i]:
opt = smp_pt(
cubeToTensor(c_i),
torch.tensor(at1)
)
rpt = reflect_point(opt, center, rdir)
if rn == 'ref_X':
rat1 = [1-at1[0], at1[1], at1[2]]
elif rn == 'ref_Y':
rat1 = [at1[0], 1-at1[1], at1[2]]
elif rn == 'ref_Z':
rat1 = [at1[0], at1[1], 1-at1[2]]
#if not isValidAtt(node, rpt, n) or not isCloseAtt(node, rpt, o, rat1):
if (isValidAtt(node, opt, n) and not isValidAtt(node, rpt, n)) or not isClosePt(node, rpt, o, rat1):
return None
return rn
# takes in a dict of symmetries, if any of the translational symmetries have more than one element try to group them
# returned group symmetries, and the longest symmetry seen
def groupSyms(syms):
gsyms = {}
longest = 2
for st in syms:
if 'tr' not in st:
gsyms[st] = syms[st]
elif len(syms[st]) == 1:
gsyms[st] = (syms[st][0][0], [syms[st][0][1]])
else:
best_l = 0
syms[st].sort()
for l in range(0, len(syms[st])):
md = syms[st][l][0]
exps = (torch.tensor([(l+1)*s[0] / md for s in syms[st][:l+1]]) - torch.arange(1, l+2)).abs().mean()
if exps > GROUP_THRESH:
break
best_l = l
group = [s[1] for s in syms[st][:best_l+1]]
longest = max(longest, len(group) + 1)
gsyms[st] = (syms[st][best_l][0], group)
return gsyms, longest
# returns list of lists of indices, where each sub-list is in the same semantic group
def getSemanticGroups(node):
groups = {}
#for i, l in enumerate(node['sem_labels'].argmax(dim=1).tolist()):
for i, l in enumerate(node['children_names']):
#spec_add(groups, l, i)
spec_add(groups, 'all', i)
return groups
def spec_add(d, k, v):
if k in d:
d[k].append(v)
else:
d[k] = [v]
def checkSameAtts(node, ind, oind):
pc_atts = node['pc_atts']
i_atts = [i[0] for i in pc_atts[ind]]
o_atts = [i[0] for i in pc_atts[oind]]
i_atts.sort()
o_atts.sort()
return (i_atts == o_atts)
# For the indices in group, returns all symmetries where index is canonical member
def getSymsForGroup(node, group):
if len(group) == 1:
return {}
if 'syms' in node:
prev_syms = set([s[0] for s in node['syms']])
else:
prev_syms = set([])
syms = {}
for ind in group:
nsyms = {}
for oind in group:
if ind == oind or ind in prev_syms or oind in prev_syms:
continue
if not checkSameAtts(node, ind, oind):
continue
trans_sym = checkTranSym(node, ind, oind)
ref_sym = checkRefSym(node, ind, oind)
if trans_sym is not None:
spec_add(nsyms, trans_sym[0], (trans_sym[1], oind))
if ref_sym is not None:
spec_add(nsyms, ref_sym, oind)
ngsyms, longest = groupSyms(nsyms)
if len(ngsyms) > 0:
syms[ind] = (longest, ngsyms)
return syms
def getLongestTranSym(ind, sg, mv):
for st in ['tr_X', 'tr_Y', 'tr_Z']:
if st in sg[1] and len(sg[1][st][1]) == (mv-1) :
ntr = sg[1][st][1]
ns = [ind, st, len(sg[1][st][1]), sg[1][st][0], ntr]
return ns, ntr
# Any node that shows up in ntr, remove all symmetries that they are involved in in pot_syms
def removePotSyms(pot_syms, ntr):
for n in ntr:
if n in pot_syms:
pot_syms.pop(n)
ntr = set(ntr)
ps_inds = []
for i in pot_syms:
sts_to_pop = []
for st in pot_syms[i][1]:
if 'ref' in st:
if pot_syms[i][1][st][0] in ntr:
sts_to_pop.append(st)
elif 'tr' in st:
js_to_pop = []
for j in range(len(pot_syms[i][1][st][1])):
if pot_syms[i][1][st][1][j] in ntr:
js_to_pop = [j] + js_to_pop
for j in js_to_pop:
pot_syms[i][1][st][1].pop(j)
if len(pot_syms[i][1][st][1]) == 0:
sts_to_pop.append(st)
elif len(js_to_pop) > 0:
pot_syms[i] = (pot_syms[i][0] - len(js_to_pop), pot_syms[i][1])
for st in sts_to_pop:
pot_syms[i][1].pop(st)
ps_inds = [i] + ps_inds
for i in ps_inds:
if len(pot_syms[i][1]) == 0:
pot_syms.pop(i)
# Takes in all of the symmetries in a group, chooses 'best' one, removes all other ones from the other nodes
def getMaxGroupSyms(pot_syms, syms = [], tr = []):
if len(pot_syms) == 0:
return syms, tr
ml = 2
mi = None
for ind in pot_syms:
if pot_syms[ind][0] > ml:
ml = pot_syms[ind][0]
mi = ind
if mi is not None:
ns, ntr = getLongestTranSym(mi, pot_syms[mi], ml)
removePotSyms(pot_syms, [mi] + ntr)
return getMaxGroupSyms(pot_syms, syms + [ns], tr + ntr)
for st in ['ref_X', 'ref_Y', 'ref_Z']:
for ind in pot_syms:
if st in pot_syms[ind][1]:
ntr = pot_syms[ind][1][st]
ns = [ind, st, ntr]
removePotSyms(pot_syms, [ind] + ntr)
return getMaxGroupSyms(pot_syms, syms + [ns], tr + ntr)
for st in ['tr_X', 'tr_Y', 'tr_Z']:
for ind in pot_syms:
if st in pot_syms[ind][1]:
ntr = [pot_syms[ind][1][st][1][0]]
ns = [ind, st, 1, pot_syms[ind][1][st][0], ntr]
removePotSyms(pot_syms, [ind] + ntr)
return getMaxGroupSyms(pot_syms, syms + [ns], tr + ntr)
# remove all nodes to remove from the list, from children, remove all attachments that mention these nodes
def cleanNode(node, ntr, syms):
if len(syms) == 0:
if 'syms' not in node:
node['syms'] = []
return
sntr = []
num_cubes_per_sym = {}
for sym in syms:
sym_cubes = []
num_cubes_per_sym[sym[0]] = len(sym[-1])
for itr in sym[-1]:
sntr.append(itr)
sym_cubes.append(node['cubes'][itr])
sym[-1] = sym_cubes
syms.sort()
sym_cubes = []
for sym in syms:
scubes = sym.pop(-1)
sym_cubes += scubes
sntr.sort(reverse=True)
cube_map = {}
count = 0
for i in range(len(node['cubes'])):
if i not in ntr:
cube_map[i] = count
count += 1
for n in sntr:
for key in ['children', 'children_names', 'cubes']:
node[key].pop(n)
atts = []
temp_atts = node.pop('attachments')
for att in temp_atts:
if att[0] not in ntr and att[1] not in ntr:
atts.append((cube_map[att[0]], cube_map[att[1]], att[2], att[3]))
if 'syms' in node:
node['syms'] += syms
else:
node['syms'] = syms
node['syms'] = [[cube_map[s[0]]] + s[1:] for s in node['syms']]
node['attachments'] = atts
def getMaxSyms(pot_group_syms):
nodes_to_remove = set()
syms = []
for pgs in pot_group_syms:
if len(pgs) == 0:
continue
group_syms, gntr = getMaxGroupSyms(pgs)
syms += group_syms
nodes_to_remove = nodes_to_remove.union(set(gntr))
return syms, nodes_to_remove
def getPerCubeAtts(node):
pc_atts = {}
for c1, c2, at1, at2 in node['attachments']:
spec_add(pc_atts, c1, (c2, at1))
spec_add(pc_atts, c2, (c1, at2))
return pc_atts
def addSimpSymmetries(node):
for child in node['children']:
if len(child) > 0:
addSimpSymmetries(child)
node['pc_atts'] = getPerCubeAtts(node)
groups = getSemanticGroups(node)
pot_group_syms = [getSymsForGroup(node, groups[g]) for g in groups]
syms, nodes_to_remove = getMaxSyms(pot_group_syms)
cleanNode(node, nodes_to_remove, syms)
##########################
def checkXRefSym(node, i, o):
c_i = node['cubes'][i]
c_o = node['cubes'][o]
if not approx_same_dims(c_i, c_o, max(node['cubes'][0]['xd'], node['cubes'][0]['yd'], node['cubes'][0]['zd'])):
return False
cdir = c_o['center'] - c_i['center']
cdir /= cdir.norm()
if cdir.dot(node['cubes'][0]['xdir']) < 1 - SANG_THRESH:
return False
center = smp_pt(cubeToTensor(node['cubes'][0]), torch.tensor([.5, .5, .5]))
rdir = node['cubes'][0]['xdir']
c_ref = reflect_cube(c_i, center, rdir)
if not approx_same_point(
c_ref['center'],
c_o['center'],
max(node['cubes'][0]['xd'], node['cubes'][0]['yd'], node['cubes'][0]['zd'])
):
return False
for n, at1 in node['pc_atts'][i]:
opt = smp_pt(
cubeToTensor(c_i),
torch.tensor(at1)
)
rpt = reflect_point(opt, center, rdir)
rat1 = [1-at1[0], at1[1], at1[2]]
#if not isCloseAtt(node, rpt, o, rat1):
if not isClosePt(node, rpt, o, rat1):
return False
return True
# Get all nodes that have a reflectional symmetry
# For now just do this over x-axis
# Check that all attachments are close in the opposite member
def getRefSyms(node):
nn = len(node['cubes'])
ref_syms = {}
for ind in range(1, nn):
if len(node['children'][ind]) > 0:
continue
for oind in range(1, nn):
if ind == oind:
continue
if len(node['children'][oind]) > 0:
continue
if checkXRefSym(node, ind, oind):
spec_add(ref_syms, ind, oind)
return ref_syms
# Get all connected components that share the same symmetry, with more than one member
# (for rotational just to take the ones with the "largest" # of members)
def getRefConnComps(pot_ref_syms, node):
groups = []
added = set()
for i in pot_ref_syms:
if i in added:
continue
group = [i]
pc_atts = set(n[0] for n in node['pc_atts'][i])
added.add(i)
again = True
while(again):
again = False
for j in pot_ref_syms:
if j not in added:
if j in pc_atts:
group.append(j)
added.add(j)
pc_atts = pc_atts.union(set(n[0] for n in node['pc_atts'][j]))
again=True
if len(group) > 1:
groups.append([
group,
[pot_ref_syms[g][0] for g in group]
])
return groups
def allConnected(a, b, node):
prev_atts = node['pc_atts']
for (_, ind), (_, oind) in zip(a, b):
found = False
for n, _ in prev_atts[ind]:
if n == oind:
found = True
if found is False:
return False
return True
# Make sure for each attachment to a cube 'outside' of the group, that the attachment is Valid
def checkValidRefAtts(ref_ccs, node):
valid_groups = []
center = smp_pt(cubeToTensor(node['cubes'][0]), torch.tensor([.5, .5, .5]))
rdir = node['cubes'][0]['xdir']
for group in ref_ccs:
isValid = True
mems = set(group[0])
for i in group[0]:
for n, at1 in node['pc_atts'][i]:
if n in mems:
continue
opt = smp_pt(
cubeToTensor(node['cubes'][i]), torch.tensor(at1)
)
rpt = reflect_point(opt, center, rdir)
if isValidAtt(node, opt, n) and not isValidAtt(node, rpt, n):
isValid = False
if isValid:
valid_groups.append(group)
return valid_groups
# For any group of valid ref syms, turn it into a sub-program
# This involves:
# - re-ordering cubes/children
# - finding new OBB
# - refinding all attachments
# Adding this information to node['syms']
def createRefSubPrograms(valid_ref_syms, node):
node.pop('pc_atts')
node.pop('attachments')
new_progs = []
to_remove = []
to_head = []
count = 0
for group in valid_ref_syms:
new_prog = {
'name': f'{node["name"]}_ssp_{count}',
'children': [],
'children_names': [],
'cubes': [],
}
count += 1
to_head.append(group[0][0])
for i in group[0]:
to_remove.append(i)
new_prog['children'].append(node['children'][i])
new_prog['children_names'].append(node['children_names'][i])
new_prog['cubes'].append(node['cubes'][i])
bbox = jp.getOBB(new_prog['cubes'])
new_prog['children'] = [{}] + new_prog['children']
new_prog['children_names'] = ["bbox"] + new_prog['children_names']
new_prog['cubes'] = [bbox] + new_prog['cubes']
ind_to_pc, scene_geom = inter.samplePC(new_prog['cubes'], split_bbox=True)
inters = inter.findInters(ind_to_pc, scene_geom)
new_prog['attachments'] = inter.calcAttachments(inters, scene_geom, ind_to_pc)
for o in group[1]:
to_remove.append(o)
new_progs.append(new_prog)
for ind, new_prog in zip(to_head, new_progs):
node['children'][ind] = new_prog
node['cubes'][ind] = new_prog['cubes'][0]
node['children_names'][ind] = new_prog['name']
cube_map = {}
count = 0
for i in range(len(node['cubes'])):
if i not in (set(to_remove) - set(to_head)):
cube_map[i] = count
count += 1
node['syms'] = [[cube_map[th], 'ref_X'] for th in to_head]
to_remove.sort(reverse=True)
to_head = set(to_head)
for tr in to_remove:
if tr in to_head:
continue
for key in ['children', 'cubes', 'children_names']:
node[key].pop(tr)
to_remove = set(to_remove) - to_head
ind_to_pc, scene_geom = inter.samplePC(node['cubes'], split_bbox=True)
inters = inter.findInters(ind_to_pc, scene_geom)
node['attachments'] = inter.calcAttachments(inters, scene_geom, ind_to_pc)
def addSymSubPrograms(node):
addRefSymSubPrograms(node)
def addRefSymSubPrograms(node):
for child in node['children']:
if len(child) > 0:
addRefSymSubPrograms(child)
node['pc_atts'] = getPerCubeAtts(node)
pot_ref_syms = getRefSyms(node)
if len(pot_ref_syms) == 0:
return
ref_ccs = getRefConnComps(pot_ref_syms, node)
if len(ref_ccs) == 0:
return
valid_ref_syms = checkValidRefAtts(ref_ccs, node)
if len(valid_ref_syms) == 0:
return
createRefSubPrograms(valid_ref_syms, node)
| 22,111 | 26.571072 | 153 | py |
ShapeMOD | ShapeMOD-main/SA_lang/parse_files/json_parse.py | import torch
import sys
import ast
import numpy as np
import random
import os
import pickle
import old_intersect as inter
import random
from copy import deepcopy
import networkx as nx
import symmetry as sym
VERBOSE = False
DO_SHORTEN = True
DO_SIMP_SYMMETRIES = True
DO_SQUEEZE = True
DO_VALID_CHECK = True
DO_NORM_AA_CUBES = True
DO_SEM_FLATTEN = True
DO_SEM_REHIER = True
SQUARE_THRESH = 0.1
SD_THRESH = 0.01
AA_THRESH = 0.995 #??
SCOL_MAP = {
'chair': set(['caster', 'mechanical_control']),
'table': set(['caster', 'cabinet_door', 'drawer', 'keyboard_tray']),
'storage': set(['drawer', 'cabinet_door', 'mirror', 'caster'])
}
SFLAT_MAP = {
'chair': set(['chair_back', 'chair_arm', 'chair_base', 'chair_seat', 'footrest', 'chair_head']),
'table': set(['tabletop', 'table_base']),
'storage': set(['cabinet_frame', 'cabinet_base'])
}
SRH_MAP = {
'storage':('cabinet_frame', set(['countertop', 'shelf', 'drawer', 'cabinet_door', 'mirror']))
}
def isSquare(a, b, c):
v = (a - b).abs()/max(a.item(), b.item(), c.item()) < SQUARE_THRESH
return v
def isAxisAligned(props, bbox, thresh=AA_THRESH):
xdir = props['xdir']
ydir = props['ydir']
zdir = props['zdir']
xdir /= xdir.norm()
ydir /= ydir.norm()
zdir /= zdir.norm()
if xdir.dot(bbox['xdir']) < thresh:
return False
if ydir.dot(bbox['ydir']) < thresh:
return False
if zdir.dot(bbox['zdir']) < thresh:
return False
return True
def shouldChange(props, bbox):
if isAxisAligned(props, bbox):
return True
xsquare = isSquare(props['zd'], props['yd'], props['xd'])
ysquare = isSquare(props['zd'], props['xd'], props['yd'])
zsquare = isSquare(props['xd'], props['yd'], props['zd'])
if xsquare and ysquare and zsquare:
return True
if xsquare:
return isSpecAxisAligned(props['xdir'], bbox['xdir'])
if ysquare:
return isSpecAxisAligned(props['ydir'], bbox['ydir'])
if zsquare:
return isSpecAxisAligned(props['zdir'], bbox['zdir'])
return False
def isSpecAxisAligned(cdir, axis):
cdir /= cdir.norm()
if cdir.dot(axis) >= AA_THRESH:
return True
else:
return False
def getDataPath(category):
if 'storage' == category:
category += 'furniture'
return f"/home/{os.getenv('USER')}/data/{category}_hier/"
def getSemOrder(category):
if category == "chair":
sem_order_path = "stats/part_semantics/PGP-Chair.txt"
elif category == "storage":
sem_order_path = "stats/part_semantics/PGP-Storage.txt"
elif category == "table":
sem_order_path = "stats/part_semantics/PGP-Table.txt"
else:
assert False, f'Invalid Category {category}'
sem_order = {"bbox": "-1","other":"100"}
with open(sem_order_path) as f:
for line in f:
sem_order[line.split()[1].split('/')[-1]] = line.split()[0]
return sem_order
def cubeOrder(cubes, names, sem_order):
d = []
min_c = np.array([1e8,1e8,1e8])
max_c = np.array([-1e8,-1e8,-1e8])
for rw in cubes:
min_c = np.min((min_c, rw['center'].numpy()), axis = 0)
max_c = np.max((max_c, rw['center'].numpy()), axis = 0)
mac = np.max(max_c)
mic = np.min(min_c)
for c_ind, (rw, name) in enumerate(zip(cubes, names)):
sc = (rw['center'].numpy() - mic) / (mac - mic)
x_r = round(sc[0]*2)/2
y_r = round(sc[1]*2)/2
z_r = round(sc[2]*2)/2
d.append((
int(sem_order[name]),
x_r + y_r + z_r,
x_r,
y_r,
z_r,
sc[0],
sc[1],
sc[2],
c_ind
))
d.sort()
return [c_ind for _,_,_,_,_,_,_,_,c_ind in d]
Sbbox = {
'xdir': torch.tensor([1.0, 0.0, 0.0]),
'ydir': torch.tensor([0.0, 1.0, 0.0]),
'zdir': torch.tensor([0.0, 0.0, 1.0]),
}
def vector_cos(norm1, norm2):
norm1 = np.asarray(norm1)
norm2 = np.asarray(norm2)
dot = np.dot(norm1, norm2)
magnitude = np.linalg.norm(norm1) * np.linalg.norm(norm2)
if magnitude == 0.:
return 0.
return dot / float(magnitude)
def orientProps(center, xd, yd, zd, xdir, ydir, zdir):
rt = np.asarray([1., 0., 0.])
up = np.asarray([0., 1., 0.])
fwd = np.asarray([0., 0., 1.])
l = [
(xdir, xd, 0),
(ydir, yd, 1),
(zdir, zd, 2),
(-1 * xdir, xd, 3),
(-1 * ydir, yd, 4),
(-1 * zdir, zd, 5)
]
rtdir, rtd, rind = sorted(
deepcopy(l), key=lambda x: vector_cos(rt, x[0]))[-1]
if rind >= 3:
l.pop(rind)
l.pop((rind+3) % 6)
else:
l.pop((rind+3) % 6)
l.pop(rind)
for i in range(0, 4):
p_ind = l[i][2]
if p_ind > max(rind, (rind+3) % 6):
l[i] = (l[i][0], l[i][1], l[i][2] - 2)
elif p_ind > min(rind, (rind+3) % 6):
l[i] = (l[i][0], l[i][1], l[i][2] - 1)
updir, upd, upind = sorted(
deepcopy(l), key=lambda x: vector_cos(up, x[0]))[-1]
if upind >= 2:
l.pop(upind)
l.pop((upind+2) % 4)
else:
l.pop((upind+2) % 4)
l.pop(upind)
fwdir, fwd, _ = sorted(l, key=lambda x: vector_cos(fwd, x[0]))[-1]
return {
'center': torch.tensor(center).float(),
'xd': torch.tensor(rtd).float(),
'yd': torch.tensor(upd).float(),
'zd': torch.tensor(fwd).float(),
'xdir': torch.tensor(rtdir).float(),
'ydir': torch.tensor(updir).float(),
'zdir': torch.tensor(fwdir).float()
}
def jsonToProps(json):
json = np.array(json)
center = np.array(json[:3])
xd = json[3]
yd = json[4]
zd = json[5]
xdir = json[6:9]
xdir /= np.linalg.norm(xdir)
ydir = json[9:]
ydir /= np.linalg.norm(ydir)
zdir = np.cross(xdir, ydir)
zdir /= np.linalg.norm(zdir)
if xd < SD_THRESH or yd < SD_THRESH or zd < SD_THRESH:
return None
props = orientProps(center, xd, yd, zd, xdir, ydir, zdir)
if DO_NORM_AA_CUBES:
if shouldChange(props, Sbbox):
props['xdir'] = Sbbox['xdir'].clone().detach()
props['ydir'] = Sbbox['ydir'].clone().detach()
props['zdir'] = Sbbox['zdir'].clone().detach()
return props
def addAttachments(node, sem_order):
co = cubeOrder(node['cubes'], node['children_names'], sem_order)
for key in ['cubes', 'children', 'children_names']:
node[key] = [node[key][i] for i in co]
ind_to_pc, scene_geom = inter.samplePC(node['cubes'], split_bbox=True)
inters = inter.findInters(ind_to_pc, scene_geom)
node['attachments'] = inter.calcAttachments(inters, scene_geom, ind_to_pc)
for child in node['children']:
if len(child) > 0:
addAttachments(child, sem_order)
# From raw json, get graph structure and all leaf cuboids
def getShapeHier(ind, category):
data_path = getDataPath(category)
with open(data_path + ind + ".json") as f:
json = None
for line in f:
json = ast.literal_eval(line)
hier = {}
queue = [(json, hier)]
while len(queue) > 0:
json, node = queue.pop(0)
if "children" not in json:
continue
name = json["label"]
# Don't add sub-programs when we collapse
collapse_names = SCOL_MAP[category]
if name in collapse_names:
continue
while("children" in json and len(json["children"])) == 1:
json = json["children"][0]
if "children" not in json:
continue
node.update({
"children": [],
"children_names": [],
"cubes": [],
"name": name
})
for c in json["children"]:
cprops = jsonToProps(c["box"])
if cprops is not None:
new_c = {}
queue.append((c, new_c))
node["children"].append(new_c)
node["cubes"].append(cprops)
node["children_names"].append(c["label"])
return hier
def getOBB(parts):
part_corners = inter.sampleCorners(parts)
bbox = inter.points_obb(part_corners.cpu(), 1)
return bbox
def cleanHier(hier):
for i in range(len(hier['children'])):
if len(hier['children'][i]) > 0:
cleanHier(hier['children'][i])
if len(hier['children']) == 0:
for key in list(hier.keys()):
hier.pop(key)
def trimHier(hier):
for i in range(len(hier['children'])):
if len(hier['children'][i]) > 0:
if len(hier['children'][i]['children']) == 1:
hier['cubes'][i] = hier['children'][i]['cubes'][0]
hier['children_names'][i] = hier['children'][i]['children_names'][0]
hier['children'][i] = hier['children'][i]['children'][0]
if len(hier['children'][i]) > 0:
trimHier(hier['children'][i])
def fillHier(hier):
for i in range(len(hier['children'])):
if len(hier['children'][i]) > 0:
hier['cubes'][i] = fillHier(hier['children'][i])
hier['bbox'] = getOBB(hier['cubes'])
return deepcopy(hier['bbox'])
# centers and orients root bounding box
# propogates transformation to all cuboids
# also instanties bounding box into the cube + children spots
def normalizeHier(hier):
hier.pop('bbox')
rbbox = {}
samps = inter.sampleCorners(hier['cubes']).cpu()
dims = samps.max(dim=0).values - samps.min(dim=0).values
rbbox['xd'] = dims[0]
rbbox['yd'] = dims[1]
rbbox['zd'] = dims[2]
rbbox['center'] = (samps.max(dim=0).values + samps.min(dim=0).values) / 2
rbbox['xdir'] = torch.tensor([1.,0.,0.])
rbbox['ydir'] = torch.tensor([0.,1.,0.])
rbbox['zdir'] = torch.tensor([0.,0.,1.])
hier['bbox'] = rbbox
offset = rbbox['center']
q = [hier]
while len(q) > 0:
n = q.pop(0)
bbox = n.pop('bbox')
n['children'] = [{}] + n['children']
n['children_names'] = ["bbox"] + n['children_names']
n['cubes'] = [bbox] + n['cubes']
for i in range(len(n['cubes'])):
n['cubes'][i]['center'] = n['cubes'][i]['center'] - offset
for c in n['children']:
if len(c) > 0:
q.append(c)
def markLeafCubes(hier):
parts = []
q = [hier]
while(len(q) > 0):
n = q.pop(0)
n['leaf_inds'] = []
assert(len(n['cubes']) == len(n['children']))
for cu, ch in zip(n['cubes'], n['children']):
if len(ch) > 0:
q.append(ch)
n['leaf_inds'].append(-1)
else:
n['leaf_inds'].append(len(parts))
parts.append(cu)
return parts
def replace_parts(hier, parts, key):
q = [hier]
while(len(q) > 0):
n = q.pop(0)
binds = []
for i in range(len(n[key])):
if n[key][i] != -1:
lpart = parts[n[key][i]]
if lpart is None:
binds.append(i)
else:
n['cubes'][i] = lpart
binds.sort(reverse=True)
for bi in binds:
n['children'].pop(bi)
n['cubes'].pop(bi)
n['children_names'].pop(bi)
for c in n['children']:
if c is not None and len(c) > 0:
q.append(c)
n.pop(key)
# Takes in a hierarchy of just leaf cuboids,
# finds new parameters for leaf cuboids so that
# part-to-part connections are as valid as possible
def shortenLeaves(hier):
if VERBOSE:
print("Doing Shortening")
parts = markLeafCubes(hier)
bad_inds = inter.findHiddenCubes(parts)
ind_to_pc, scene_geom = inter.samplePC(parts)
inters = inter.findInters(ind_to_pc, scene_geom)
dim_parts = [
(p['xd'] * p['yd'] * p['zd'], i) for i,p in enumerate(parts)
]
dim_parts.sort()
for _, ind in dim_parts:
if ind in bad_inds:
continue
if VERBOSE:
print(f"Shortening Leaf ind: {ind}")
sres = inter.shorten_cube(inters, parts, ind, scene_geom)
if sres is not None:
t_ind_to_pc, t_scene_geom = inter.samplePC([parts[ind]])
ind_to_pc[ind] = t_ind_to_pc[0]
scene_geom[ind] = t_scene_geom[0]
sres = [(int(s.split('_')[0]), int(s.split('_')[1])) for s in sres]
new_inters = inter.findInters(ind_to_pc, scene_geom, sres)
inters.update(new_inters)
if parts[ind]['xd'] < SD_THRESH or \
parts[ind]['yd'] < SD_THRESH or \
parts[ind]['zd'] < SD_THRESH:
bad_inds.append(ind)
for bi in bad_inds:
parts[bi] = None
replace_parts(hier, parts, 'leaf_inds')
def make_conn_graph(num_nodes, attachments):
edges = []
for (ind1, ind2, _, _) in attachments:
edges.append((ind1, ind2))
G = nx.Graph()
G.add_nodes_from(list(range(num_nodes)))
G.add_edges_from(edges)
return G
def assertConnected(num_nodes, attachments):
G = make_conn_graph(num_nodes, attachments)
assert nx.number_connected_components(G) == 1, 'disconnected graph'
def checkConnected(node):
assertConnected(len(node['cubes']), node['attachments'])
for c in node['children']:
if len(c) > 0:
checkConnected(c)
def memoize(f):
def helper(ind, category):
cdir = "parse_cache"
cached_res = os.listdir(cdir)
if ind in cached_res:
return pickle.load(open(cdir+"/"+ind, "rb"))
else:
hier = f(ind, category)
pickle.dump(hier, open(cdir+"/"+ind, "wb"))
return hier
return helper
def checkCubeNum(node):
assert len(node['cubes']) <= 11, f"Saw program with {len(node['cubes'])} cubes"
for c in node['children']:
if len(c) > 0:
checkCubeNum(c)
def flattenNode(node):
for c in node['children']:
if len(c) > 0:
flattenNode(c)
# Now everything has one sub-program
fcubes = []
fchildren = []
fchildren_names = []
for i in range(len(node['children'])):
if len(node['children'][i]) == 0:
fcubes.append(node['cubes'][i])
fchildren.append(node['children'][i])
fchildren_names.append(node['children_names'][i])
else:
fcubes += node['children'][i]['cubes']
fchildren += node['children'][i]['children']
fchildren_names += node['children'][i]['children_names']
node['cubes'] = fcubes
node['children'] = fchildren
node['children_names'] = fchildren_names
def semFlattenHier(hier, category):
flat_names = SFLAT_MAP[category]
q = [hier]
while (len(q) > 0):
node = q.pop(0)
if node['name'] in flat_names:
flattenNode(node)
else:
for c in node['children']:
if len(c) > 0:
q.append(c)
def semReHier(hier, category):
if category not in SRH_MAP:
return
rh_tar, rh_names = SRH_MAP[category]
if rh_tar not in hier['children_names']:
return
rhinds = []
for i,name in enumerate(hier['children_names']):
if name in rh_names:
rhinds.append(i)
rhinds.sort(reverse=True)
ti = hier['children_names'].index(rh_tar)
for i in rhinds:
for key in ['children_names', 'cubes', 'children']:
hier['children'][ti][key].append(hier[key][i])
for i in rhinds:
for key in ['children_names', 'cubes', 'children']:
hier[key].pop(i)
if len(hier['children']) == 1:
hier['children_names'] = hier['children'][0]['children_names']
hier['cubes'] = hier['children'][0]['cubes']
hier['children'] = hier['children'][0]['children']
right = torch.tensor([1.0,0.5,0.5])
left = torch.tensor([0.0,0.5,0.5])
top = torch.tensor([0.5,1.0,0.5])
bot = torch.tensor([0.5,0.0,0.5])
front = torch.tensor([0.5,0.5,1.0])
back = torch.tensor([0.5,0.5,0.0])
def isFaceAtt(face, oface):
if not DO_SQUEEZE:
return None
face = torch.tensor(face)
if (face-right).norm() < .1 and abs(oface[0] - 0.0) < .1:
return 'right'
if (face-left).norm() < .1 and abs(oface[0] - 1.0) < .1:
return 'left'
if (face-top).norm() < .1 and abs(oface[1] - 0.0) < .1:
return 'top'
if (face-bot).norm() < .1 and abs(oface[1] - 1.0) < .1:
return 'bot'
if (face-front).norm() < .1 and abs(oface[2] - 0.0) < .1:
return 'front'
if (face-back).norm() < .1 and abs(oface[2] - 1.0) < .1:
return 'back'
return None
def preProc(node):
node['aligned'] = [isAxisAligned(cube, node['cubes'][0]) for cube in node['cubes']]
node['cubes'] = [{f : cube[f].tolist() for f in cube } for cube in node['cubes']]
new_attachments = []
for i0, i1, a0, a1 in node['attachments']:
if i0 == 0:
if a0[1] < .5:
i0 = -2
else:
i0 = -1
# FLIP BBOX TOP AND BOT FOR CONSISTENCY
a0[1] = 1-a0[1]
f0 = isFaceAtt(a0, a1)
f1 = isFaceAtt(a1, a0)
assert i1 != 0, 'uh oh'
new_attachments.append((
i0,
i1,
a0,
a1,
f0,
f1
))
node['attachments'] = new_attachments
def parseJsonToHier(ind, category, get_gt=False):
sem_order = getSemOrder(category)
hier = getShapeHier(ind, category)
assert len(hier) > 0, 'saw empty hier'
if DO_SEM_FLATTEN:
semFlattenHier(hier, category)
if DO_SEM_REHIER:
semReHier(hier, category)
if DO_SHORTEN:
shortenLeaves(hier)
cleanHier(hier)
trimHier(hier)
fillHier(hier)
normalizeHier(hier)
addAttachments(hier, sem_order)
if get_gt:
return hier
if DO_VALID_CHECK:
checkConnected(hier)
if DO_SIMP_SYMMETRIES:
sym.addSymSubPrograms(hier)
sym.addSimpSymmetries(hier)
if DO_VALID_CHECK:
checkCubeNum(hier)
return hier
def memoize(f):
def helper(ind, category, part):
cdir = "parse_part_cache"
cached_res = os.listdir(cdir)
if f"{ind}_{part}" in cached_res:
return pickle.load(open(f"{cdir}/{ind}_{part}", "rb"))
else:
try:
hier = f(ind, category, part)
except Exception as e:
print(f"Failed {ind} with {e}")
hier = None
pickle.dump(hier, open(f"{cdir}/{ind}_{part}", "wb"))
return hier
return helper
def genAllData(outdir, ind, category):
hier = parseJsonToHier(ind, category)
q = [hier]
seen = set()
while(len(q)>0):
node = q.pop(0)
for i, c in enumerate(node['children']):
while node['children_names'][i] in seen and i > 0:
node['children_names'][i] += '+'
if len(c) > 0:
c['name'] = node['children_names'][i]
q.append(c)
node.pop('children')
preProc(node)
part = node['name']
pickle.dump(node, open(f"{outdir}/{ind}_{part}", "wb"))
def loadAllData(in_dir, max_files=int(1e8)):
ninds = []
nodes = []
files = list(os.listdir(in_dir))
files.sort()
for f in files[:max_files]:
node = pickle.load(open(f"{in_dir}/{f}", "rb"))
if node is not None:
ninds.append(f)
nodes.append(node)
return ninds, nodes
if __name__ == '__main__':
category = sys.argv[1]
outdir = sys.argv[2]
PATH_TO_SA_DATA = sys.argv[3]
inds = os.listdir(PATH_TO_SA_DATA)
inds = [i.split('.')[0] for i in inds]
#inds = ['173', '2307', '44366', '1282']
#inds += ['42231', '40507', '36402', '41162', '41830']
os.system(f'mkdir {outdir}')
from tqdm import tqdm
for ind in tqdm(inds):
try:
hier = genAllData(outdir, str(ind), category)
except Exception as e:
print(e)
| 20,758 | 25.95974 | 100 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/infer_recon_metrics.py | from ShapeAssembly import hier_execute
import sa_utils as utils
import torch
import os
import sys
import math
import faiss
import numpy as np
from valid import check_stability, check_rooted
device = torch.device("cuda")
class SimpChamferLoss(torch.nn.Module):
def __init__(self, device):
super(SimpChamferLoss, self).__init__()
self.dimension = 3
self.gpu_id = torch.cuda.current_device()
self.res = faiss.StandardGpuResources()
def build_nn_index(self, database):
"""
:param database: numpy array of Nx3
:return: Faiss index, in CPU
"""
index_cpu = faiss.IndexFlatL2(self.dimension)
index = faiss.index_cpu_to_gpu(self.res, self.gpu_id, index_cpu)
index.add(database)
return index
def search_nn(self, index, query, k):
D, I = index.search(query, 1)
return np.sqrt(D)
def getAvgDist(self, index, query):
D, I = index.search(query, 2)
m_d = math.sqrt(np.percentile(D[:,1],90))
return m_d
def calc_metrics(self, predict_pc, gt_pc, threshes):
"""
:param predict_pc: Bx3xM Variable in GPU
:param gt_pc: Bx3xN Variable in GPU
:return:
"""
predict_pc_size = predict_pc.size()
gt_pc_size = gt_pc.size()
predict_pc_np = np.ascontiguousarray(
torch.transpose(predict_pc.data.clone(), 1, 2).cpu().numpy()
) # BxMx3
gt_pc_np = np.ascontiguousarray(
torch.transpose(gt_pc.data.clone(), 1, 2).cpu().numpy()
) # BxNx3
index_predict = self.build_nn_index(predict_pc_np[0])
index_gt = self.build_nn_index(gt_pc_np[0])
fwd_dist = self.search_nn(index_gt, predict_pc_np[0], 1)
bwd_dist = self.search_nn(index_predict, gt_pc_np[0], 1)
cd = (fwd_dist.mean() / 2) + (bwd_dist.mean() / 2)
ones = np.ones(fwd_dist.shape)
fscores = []
for thresh in threshes:
if thresh == 'def':
thresh = self.getAvgDist(index_gt, gt_pc_np[0])
precision = (100 / ones.shape[0]) * np.sum(ones[fwd_dist <= thresh])
recall = (100 / ones.shape[0]) * np.sum(ones[bwd_dist <= thresh])
fs = (2*precision*recall) / (precision + recall + 1e-8)
fscores.append(fs)
return [cd] + fscores
class SimpCPUChamferLoss(torch.nn.Module):
def __init__(self):
super(SimpCPUChamferLoss, self).__init__()
self.dimension = 3
def build_nn_index(self, database):
"""
:param database: numpy array of Nx3
:return: Faiss index, in CPU
"""
index = faiss.IndexFlatL2(self.dimension)
index.add(database)
return index
def search_nn(self, index, query, k):
D, I = index.search(query, 1)
return np.sqrt(D)
def getAvgDist(self, index, query):
D, I = index.search(query, 2)
m_d = math.sqrt(np.percentile(D[:,1],90))
return m_d
def calc_metrics(self, predict_pc, gt_pc, threshes):
"""
:param predict_pc: Bx3xM Variable in GPU
:param gt_pc: Bx3xN Variable in GPU
:return:
"""
predict_pc_size = predict_pc.size()
gt_pc_size = gt_pc.size()
predict_pc_np = np.ascontiguousarray(
torch.transpose(predict_pc.data.clone(), 1, 2).cpu().numpy()
) # BxMx3
gt_pc_np = np.ascontiguousarray(
torch.transpose(gt_pc.data.clone(), 1, 2).cpu().numpy()
) # BxNx3
index_predict = self.build_nn_index(predict_pc_np[0])
index_gt = self.build_nn_index(gt_pc_np[0])
fwd_dist = self.search_nn(index_gt, predict_pc_np[0], 1)
bwd_dist = self.search_nn(index_predict, gt_pc_np[0], 1)
cd = (fwd_dist.mean() / 2) + (bwd_dist.mean() / 2)
ones = np.ones(fwd_dist.shape)
fscores = []
for thresh in threshes:
if thresh == 'def':
thresh = self.getAvgDist(index_gt, gt_pc_np[0])
precision = (100 / ones.shape[0]) * np.sum(ones[fwd_dist <= thresh])
recall = (100 / ones.shape[0]) * np.sum(ones[bwd_dist <= thresh])
fs = (2*precision*recall) / (precision + recall + 1e-8)
fscores.append(fs)
return [cd] + fscores
chamfer = SimpCPUChamferLoss()
def getSampMetrics(verts, faces, t_samps):
p_samps = torch.clamp(
utils.sample_surface(faces, verts.unsqueeze(0), t_samps.shape[0], False)[0],
-1, 1)
samp_metrics = chamfer.calc_metrics(
p_samps.T.unsqueeze(0).float(),
t_samps.T.unsqueeze(0).float(),
[.05, .03, .01, 'def']
)
return {
'cd': samp_metrics[0],
'fscore-05': samp_metrics[1],
'fscore-03': samp_metrics[2],
'fscore-01': samp_metrics[3],
'fscore-def': samp_metrics[4],
}
def getShapeIoU(cubes, gt_cubes):
pvoxels = shape_voxelize(cubes)
tvoxels = shape_voxelize(gt_cubes)
iou = 100 * (
(pvoxels & tvoxels).sum().item()
/ (pvoxels | tvoxels).sum().item()
)
return iou
def recon_metrics(
recon_sets, outpath, exp_name, name, epoch, VERBOSE, num_gen
):
misses = 0.
results = {
'iou': [],
'cd': [],
'fscore-def': [],
'fscore-01': [],
'fscore-03': [],
'fscore-05': [],
'rooted': [],
'stable': []
}
count = 0
for prog, gt_prog, prog_ind, gt_pts in recon_sets:
gt_verts, gt_faces, gt_cubes = hier_execute(gt_prog, return_all = True)
try:
verts, faces, cubes = hier_execute(prog, return_all = True)
assert not torch.isnan(verts).any(), 'saw nan vert'
try:
if check_rooted(verts, faces):
results['rooted'].append(1.)
else:
results['rooted'].append(0.)
if check_stability(verts, faces):
results['stable'].append(1.)
else:
results['stable'].append(0.)
except Exception as e:
print(f"Failed rooted/stable with {e} ???")
except Exception as e:
misses += 1.
if VERBOSE:
print(f"failed recon metrics for {prog_ind} with {e}")
continue
gt_objs = os.listdir(f"{outpath}/{exp_name}/objs/gt/")
try:
sm = getSampMetrics(verts, faces, gt_pts)
for k, v in sm.items():
if v is not None:
results[k].append(v)
except Exception as e:
if VERBOSE:
print(f"failed Samp Metrics for {prog_ind} with {e}")
if count >= num_gen:
continue
if f"{prog_ind}.obj" not in gt_objs:
utils.writeObj(gt_verts, gt_faces, f"{outpath}/{exp_name}/objs/gt/{prog_ind}.obj")
if 'dsl_prog' in gt_prog:
utils.writeHierProg(gt_prog, 'dsl_prog', f"{outpath}/{exp_name}/programs/gt/{prog_ind}.txt")
else:
utils.sawriteHierProg(gt_prog, f"{outpath}/{exp_name}/programs/gt/{prog_ind}.txt")
try:
utils.writeObj(
verts, faces, f"{outpath}/{exp_name}/objs/{name}/{epoch}_{prog_ind}.obj"
)
if 'dsl_prog' in prog:
utils.writeHierProg(
prog, 'dsl_prog', f"{outpath}/{exp_name}/programs/{name}/{epoch}_{prog_ind}.txt"
)
else:
utils.sawriteHierProg(
prog, f"{outpath}/{exp_name}/programs/{name}/{epoch}_{prog_ind}.txt"
)
count += 1
except Exception as e:
if VERBOSE:
print(f"Failed writing prog/obj for {prog_ind} with {e}")
for key in results:
if len(results[key]) > 0:
res = torch.tensor(results[key]).mean().item()
else:
res = 0.
results[key] = res
return results, misses
| 8,389 | 30.660377 | 108 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/losses.py | import torch
import torch.nn as nn
import faiss
import numpy as np
import math
import generate as gen
import execute as ex
from copy import deepcopy
import json_parse as jp
def robust_norm(var, dim=2):
return ((var ** 2).sum(dim=dim) + 1e-8).sqrt()
class FScore():
def __init__(self):
self.dimension = 3
self.k = 1
self.forward_loss = torch.FloatTensor([0])
self.backward_loss = torch.FloatTensor([0])
def build_nn_index(self, database):
"""
:param database: numpy array of Nx3
:return: Faiss index, in CPU
"""
index_cpu = faiss.IndexFlatL2(self.dimension)
index = index_cpu
index.add(database)
return index
def search_nn(self, index, query, k):
"""
:param index: Faiss index
:param query: numpy array of Nx3
:return: D: Variable of Nxk, type FloatTensor, in GPU
I: Variable of Nxk, type LongTensor, in GPU
"""
D, I = index.search(query, k)
D_var = torch.from_numpy(np.ascontiguousarray(D))
I_var = torch.from_numpy(np.ascontiguousarray(I).astype(np.int64))
return D_var, I_var
def getAvgDist(self, index, query):
D, I = index.search(query, 2)
m_d = math.sqrt(np.percentile(D[:,1],90))
return m_d
def getOpMatch(self, points):
return (points.max(axis = 0) - points.min(axis = 0)).max() / 100
def score(self, predict_pc_6, gt_pc_6, use_normals=True):
"""
:param predict_pc: Bx3xM Variable in GPU
:param gt_pc: Bx3xN Variable in GPU
:return:
"""
predict_pc = predict_pc_6[:, :3, :].cpu()
gt_pc = gt_pc_6[:, :3, :].cpu()
#
predict_pcn = predict_pc_6[:, 3:, :].cpu()
gt_pcn = gt_pc_6[:, 3:, :].cpu()
predict_pc_size = predict_pc.size()
gt_pc_size = gt_pc.size()
predict_pc_np = np.ascontiguousarray(
torch.transpose(predict_pc.data.clone(), 1, 2).cpu().numpy()
) # BxMx3
gt_pc_np = np.ascontiguousarray(
torch.transpose(gt_pc.data.clone(), 1, 2).cpu().numpy()
) # BxNx3
# selected_gt: Bxkx3xM
selected_gt_by_predict = torch.FloatTensor(
predict_pc_size[0], self.k, predict_pc_size[1], predict_pc_size[2]
)
# selected_predict: Bxkx3xN
selected_predict_by_gt = torch.FloatTensor(
gt_pc_size[0], self.k, gt_pc_size[1], gt_pc_size[2]
)
if use_normals:
# normals
selected_gt_by_predictn = torch.FloatTensor(
predict_pc_size[0], self.k, predict_pc_size[1], predict_pc_size[2]
)
selected_predict_by_gtn = torch.FloatTensor(
gt_pc_size[0], self.k, gt_pc_size[1], gt_pc_size[2]
)
# process each batch independently.
for i in range(predict_pc_np.shape[0]):
index_predict = self.build_nn_index(predict_pc_np[i])
index_gt = self.build_nn_index(gt_pc_np[i])
# database is gt_pc, predict_pc -> gt_pc -----------------------------------------------------------
_, I_var = self.search_nn(index_gt, predict_pc_np[i], self.k)
# process nearest k neighbors
for k in range(self.k):
selected_gt_by_predict[i, k, ...] = gt_pc[i].index_select(
1, I_var[:, k]
)
if use_normals:
selected_gt_by_predictn[i, k, ...] = gt_pcn[i].index_select(
1, I_var[:, k]
)
# database is predict_pc, gt_pc -> predict_pc -------------------------------------------------------
_, I_var = self.search_nn(index_predict, gt_pc_np[i], self.k)
# process nearest k neighbors
for k in range(self.k):
selected_predict_by_gt[i, k, ...] = predict_pc[i].index_select(
1, I_var[:, k]
)
if use_normals:
selected_predict_by_gtn[i, k, ...] = predict_pcn[i].index_select(
1, I_var[:, k]
)
index_gt = self.build_nn_index(gt_pc_np[i])
dist = self.getAvgDist(index_gt, gt_pc_np[0])
# compute loss ===================================================
# selected_gt(Bxkx3xM) vs predict_pc(Bx3xM)
r_to_gt = predict_pc.unsqueeze(1).expand_as(selected_gt_by_predict) - selected_gt_by_predict
gt_to_r = selected_predict_by_gt - gt_pc.unsqueeze(1).expand_as(selected_predict_by_gt)
r_to_gt = robust_norm(
selected_gt_by_predict
- predict_pc.unsqueeze(1).expand_as(selected_gt_by_predict),
dim=2
).cpu().detach().numpy()
gt_to_r = robust_norm(
selected_predict_by_gt
- gt_pc.unsqueeze(1).expand_as(selected_predict_by_gt),
dim=2
).cpu().detach().numpy()
r_to_gt = r_to_gt.flatten()
gt_to_r = gt_to_r.flatten()
ones = np.ones(r_to_gt.shape)
dt = dist
precision = (100 / ones.shape[0]) * np.sum(ones[r_to_gt < dt])
recall = (100 / ones.shape[0]) * np.sum(ones[gt_to_r < dt])
return (2*precision*recall) / (precision + recall + 1e-8)
class ChamferLoss(nn.Module):
def __init__(self, device):
super(ChamferLoss, self).__init__()
self.dimension = 3
self.k = 1
#
self.device = device
self.gpu_id = torch.cuda.current_device()
self.faiss_gpu = hasattr(faiss, 'StandardGpuResources')
if self.faiss_gpu:
self.res = faiss.StandardGpuResources()
self.flat_config = faiss.GpuIndexFlatConfig()
self.flat_config.device = self.gpu_id
def build_nn_index(self, database):
"""
:param database: numpy array of Nx3
:return: Faiss index, in CPU
"""
# index = faiss.GpuIndexFlatL2(self.res, self.dimension, self.flat_config) # dimension is 3
index_cpu = faiss.IndexFlatL2(self.dimension)
if self.faiss_gpu:
index = faiss.index_cpu_to_gpu(self.res, self.gpu_id, index_cpu)
else:
index = index_cpu
index.add(database)
return index
def search_nn(self, index, query, k):
"""
:param index: Faiss index
:param query: numpy array of Nx3
:return: D: Variable of Nxk, type FloatTensor, in GPU
I: Variable of Nxk, type LongTensor, in GPU
"""
D, I = index.search(query, k)
D_var = torch.from_numpy(np.ascontiguousarray(D))
I_var = torch.from_numpy(np.ascontiguousarray(I).astype(np.int64))
if self.gpu_id >= 0:
D_var = D_var.to(self.device)
I_var = I_var.to(self.device)
return D_var, I_var
def forward(self, predict_pc_6, gt_pc_6, thresh, keep_dim=False, use_normals=True):
"""
:param predict_pc: Bx3xM Variable in GPU
:param gt_pc: Bx3xN Variable in GPU
:return:
"""
predict_pc = predict_pc_6[:, :3, :]
gt_pc = gt_pc_6[:, :3, :]
#
if use_normals:
predict_pcn = predict_pc_6[:, 3:, :]
gt_pcn = gt_pc_6[:, 3:, :]
predict_pc_size = predict_pc.size()
gt_pc_size = gt_pc.size()
predict_pc_np = np.ascontiguousarray(
torch.transpose(predict_pc.data.clone(), 1, 2).cpu().numpy()
) # BxMx3
gt_pc_np = np.ascontiguousarray(
torch.transpose(gt_pc.data.clone(), 1, 2).cpu().numpy()
) # BxNx3
# selected_gt: Bxkx3xM
selected_gt_by_predict = torch.FloatTensor(
predict_pc_size[0], self.k, predict_pc_size[1], predict_pc_size[2]
)
# selected_predict: Bxkx3xN
selected_predict_by_gt = torch.FloatTensor(
gt_pc_size[0], self.k, gt_pc_size[1], gt_pc_size[2]
)
if use_normals:
# normals
selected_gt_by_predictn = torch.FloatTensor(
predict_pc_size[0], self.k, predict_pc_size[1], predict_pc_size[2]
)
selected_predict_by_gtn = torch.FloatTensor(
gt_pc_size[0], self.k, gt_pc_size[1], gt_pc_size[2]
)
if self.gpu_id >= 0:
selected_gt_by_predict = selected_gt_by_predict.to(self.device)
selected_predict_by_gt = selected_predict_by_gt.to(self.device)
if use_normals:
selected_gt_by_predictn = selected_gt_by_predictn.to(self.device)
selected_predict_by_gtn = selected_predict_by_gtn.to(self.device)
# process each batch independently.
for i in range(predict_pc_np.shape[0]):
index_predict = self.build_nn_index(predict_pc_np[i])
index_gt = self.build_nn_index(gt_pc_np[i])
# database is gt_pc, predict_pc -> gt_pc -----------------------------------------------------------
_, I_var = self.search_nn(index_gt, predict_pc_np[i], self.k)
# process nearest k neighbors
for k in range(self.k):
selected_gt_by_predict[i, k, ...] = gt_pc[i].index_select(
1, I_var[:, k]
)
if use_normals:
selected_gt_by_predictn[i, k, ...] = gt_pcn[i].index_select(
1, I_var[:, k]
)
# database is predict_pc, gt_pc -> predict_pc -------------------------------------------------------
_, I_var = self.search_nn(index_predict, gt_pc_np[i], self.k)
# process nearest k neighbors
for k in range(self.k):
selected_predict_by_gt[i, k, ...] = predict_pc[i].index_select(
1, I_var[:, k]
)
if use_normals:
selected_predict_by_gtn[i, k, ...] = predict_pcn[i].index_select(
1, I_var[:, k]
)
# compute loss ===================================================
# selected_gt(Bxkx3xM) vs predict_pc(Bx3xM)
forward_loss_element = robust_norm(
selected_gt_by_predict
- predict_pc.unsqueeze(1).expand_as(selected_gt_by_predict)
)
self.forward_loss = forward_loss_element.mean()
self.forward_loss_array = forward_loss_element.mean(dim=1).mean(dim=1)
# selected_predict(Bxkx3xN) vs gt_pc(Bx3xN)
backward_loss_element = robust_norm(
selected_predict_by_gt
- gt_pc.unsqueeze(1).expand_as(selected_predict_by_gt)
) # BxkxN
self.backward_loss = backward_loss_element.mean()
self.backward_loss_array = backward_loss_element.mean(dim=1).mean(dim=1)
#
if keep_dim:
self.forward_loss = torch.relu(forward_loss_element - thresh).squeeze(1).mean(dim=1)
self.backward_loss = torch.relu(backward_loss_element - thresh).squeeze(1).mean(dim=1)
return self.forward_loss + self.backward_loss
else:
self.forward_loss = torch.relu(forward_loss_element.flatten() - thresh).mean()
self.backward_loss = torch.relu(backward_loss_element.flatten() - thresh).mean()
return self.forward_loss + self.backward_loss
def __call__(self, predict_pc, gt_pc, ang_wt, keep_dim=False):
# start_time = time.time()
loss = self.forward(predict_pc, gt_pc, ang_wt, keep_dim)
# print(time.time()w-start_time)
return loss
def weighted_mae_loss(input, target, weight):
return torch.sum(weight * (input - target).abs())
closs = torch.nn.BCEWithLogitsLoss(reduction='sum', pos_weight = torch.tensor(0.2))
celoss = nn.CrossEntropyLoss(reduction='sum')
# 0 -> start
# 1 -> Cuboid
# 2 -> attach
# 3 -> reflect
# 4 -> translate
# 5 -> squeeze
# 6 -> end
# 0 - 7 -> commandIndex
# 7 -> 18 -> cuboid1
# 18 -> 29 -> cuboid2
# 29 -> 40 -> cuboid3
# 40 -> 43 -> l,h,w
# 43 -> 46 -> x1,y1,z1
# 46 -> 49 -> x2,y2,z2
# 49 - 52 -> symaxis
# 52 -> num
# 53 -> scale
# 54 - 60 face
# 60 - 62 - UV
# 62 -> aligned
# BBOX DIMS 40 42
class ProgLoss(nn.Module):
def forward(self, pred, target, weight):
if target.shape[1] > pred.shape[1]:
target = target[:,:pred.shape[1],:]
weight = weight[:,:pred.shape[1],:]
commands = torch.argmax(target[:,:,:7], dim = 2).flatten()
pcommands = torch.argmax(pred[:,:,:7], dim = 2).flatten()
cmdc = (commands == pcommands).sum().item() * 1.0
cmd_loss = celoss(
pred[:,:,:7].view(-1,7),
commands
)
cub_inds = (commands==1).nonzero().flatten()
ap_inds = (commands==2).nonzero().flatten()
ref_inds = (commands==3).nonzero().flatten()
trans_inds = (commands==4).nonzero().flatten()
sq_inds = (commands==5).nonzero().flatten()
cub_prm_loss = 0.
align_loss = 0.
align_num_pos = 0.
align_num_neg = 0.
align_posc = 0.
align_negc = 0.
if cub_inds.sum() > 0:
cub_prm_loss = weighted_mae_loss(
pred[:,cub_inds,:], target[:,cub_inds,:], weight[:,cub_inds,:]
)
if cub_inds.sum() > 1:
align_pos_inds = cub_inds[1:][(target[:, cub_inds[1:], 62].flatten() == 1.).nonzero().flatten()]
align_neg_inds = cub_inds[1:][(target[:, cub_inds[1:], 62].flatten() == 0.).nonzero().flatten()]
align_num_pos += align_pos_inds.shape[0]
align_num_neg += align_neg_inds.shape[0]
align_loss = closs(
pred[:, cub_inds[1:], 62].flatten(),
target[:, cub_inds[1:], 62].flatten()
)
align_posc = (pred[:, align_pos_inds, 62] > 0.).sum().float().item()
align_negc = (pred[:, align_neg_inds, 62] <= 0.).sum().float().item()
xyz_prm_loss = 0
cubc = 0
ap_cub_loss = 0
if ap_inds.sum() > 0:
xyz_prm_loss = weighted_mae_loss(
pred[:,ap_inds,:], target[:,ap_inds,:], weight[:,ap_inds,:]
)
ap_cube_1s = torch.argmax(target[:,ap_inds,7:18], dim=2).flatten()
ap_cube_2s = torch.argmax(target[:,ap_inds,18:29], dim=2).flatten()
ap_pcube_1s = torch.argmax(pred[:,ap_inds,7:18], dim=2).flatten()
ap_pcube_2s = torch.argmax(pred[:,ap_inds,18:29], dim=2).flatten()
ap_cubc = ((ap_pcube_1s == ap_cube_1s) * (ap_pcube_2s == ap_cube_2s)).sum().item() * 1.0
cubc = ap_cubc
ap_cub_loss = celoss(
pred[:,ap_inds,7:18].view(-1, 11),
ap_cube_1s
)
ap_cub_loss += celoss(
pred[:,ap_inds,18:29].view(-1, 11),
ap_cube_2s
)
cub_loss = ap_cub_loss
axisc = 0.
axis_loss = 0.
sym_cubc = 0.
sym_cub_loss = 0.
sym_prm_loss = 0.
if ref_inds.sum() > 0:
ref_cube_1s = torch.argmax(target[:,ref_inds,7:18], dim=2).flatten()
ref_pcube_1s = torch.argmax(pred[:,ref_inds,7:18], dim=2).flatten()
ref_cubc = ((ref_pcube_1s == ref_cube_1s)).sum().item() * 1.0
ref_axis = torch.argmax(target[:, ref_inds, 49:52], dim=2).flatten()
ref_paxis = torch.argmax(pred[:, ref_inds, 49:52], dim=2).flatten()
ref_axisc = ((ref_paxis == ref_axis)).sum().item() * 1.0
ref_axis_loss = celoss(
pred[:,ref_inds,49:52].view(-1, 3),
ref_axis
)
ref_cub_loss = celoss(
pred[:,ref_inds,7:18].view(-1, 11),
ref_cube_1s
)
axisc += ref_axisc
axis_loss += ref_axis_loss
sym_cubc += ref_cubc
sym_cub_loss += ref_cub_loss
if trans_inds.sum() > 0:
trans_cube_1s = torch.argmax(target[:,trans_inds,7:18], dim=2).flatten()
trans_pcube_1s = torch.argmax(pred[:,trans_inds,7:18], dim=2).flatten()
trans_cubc = ((trans_pcube_1s == trans_cube_1s)).sum().item() * 1.0
trans_axis = torch.argmax(target[:, trans_inds, 49:52], dim=2).flatten()
trans_paxis = torch.argmax(pred[:, trans_inds, 49:52], dim=2).flatten()
trans_axisc = ((trans_paxis == trans_axis)).sum().item() * 1.0
trans_axis_loss = celoss(
pred[:,trans_inds,49:52].view(-1, 3),
trans_axis
)
trans_cub_loss = celoss(
pred[:,trans_inds,7:18].view(-1, 11),
trans_cube_1s
)
trans_prm_loss = weighted_mae_loss(
pred[:,trans_inds,:], target[:,trans_inds,:], weight[:,trans_inds,:]
)
axisc += trans_axisc
axis_loss += trans_axis_loss
sym_cubc += trans_cubc
sym_cub_loss += trans_cub_loss
sym_prm_loss += trans_prm_loss
uv_prm_loss = 0
sq_cubc = 0
sq_cub_loss = 0
face_loss = 0
facec = 0
if sq_inds.sum() > 0:
uv_prm_loss = weighted_mae_loss(
pred[:,sq_inds,:],
target[:,sq_inds,:],
weight[:,sq_inds,:]
)
sq_cube_1s = torch.argmax(target[:,sq_inds,7:18], dim=2).flatten()
sq_cube_2s = torch.argmax(target[:,sq_inds,18:29], dim=2).flatten()
sq_cube_3s = torch.argmax(target[:,sq_inds,29:40], dim=2).flatten()
faces = torch.argmax(target[:, sq_inds, 54:60], dim=2).flatten()
sq_pcube_1s = torch.argmax(pred[:,sq_inds,7:18], dim=2).flatten()
sq_pcube_2s = torch.argmax(pred[:,sq_inds,18:29], dim=2).flatten()
sq_pcube_3s = torch.argmax(pred[:,sq_inds,29:40], dim=2).flatten()
pfaces = torch.argmax(pred[:,sq_inds,54:60], dim=2).flatten()
sq_cubc = (
(sq_pcube_1s == sq_cube_1s) * \
(sq_pcube_2s == sq_cube_2s) * \
(sq_pcube_3s == sq_cube_3s)
).sum().item() * 1.0
facec = (faces == pfaces).sum().item() * 1.0
sq_cub_loss = celoss(
pred[:,sq_inds,7:18].view(-1, 11),
sq_cube_1s
)
sq_cub_loss += celoss(
pred[:,sq_inds,18:29].view(-1, 11),
sq_cube_2s
)
sq_cub_loss += celoss(
pred[:,sq_inds,29:40].view(-1, 11),
sq_cube_3s
)
face_loss = celoss(
pred[:,sq_inds,54:60].view(-1, 6),
faces
)
losses = {
'cmd': cmd_loss,
'cub_prm': cub_prm_loss,
'xyz_prm': xyz_prm_loss,
'uv_prm': uv_prm_loss,
'sym_prm': sym_prm_loss,#
'cub': cub_loss,
'sq_cub': sq_cub_loss,
'sym_cub': sym_cub_loss,#
'axis': axis_loss,#
'face': face_loss,
'cmdc': cmdc,
'cubc': cubc,
'sq_cubc': sq_cubc,
'sym_cubc': sym_cubc,#
'axisc': axisc,#
'facec': facec,
'align': align_loss,
'palignc': align_posc,
'nalignc': align_negc,
'nan': align_num_neg,
'nap': align_num_pos,
'na': ap_inds.shape[0] * 1.0,
'nc': cub_inds.shape[0] * 1.0,
'ns': (ref_inds.shape[0] * 1.0) + (trans_inds.shape[0] * 1.0),
'nsq': sq_inds.shape[0] * 1.0
}
return losses
| 20,155 | 33.811744 | 113 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/make_abs_data.py | import sys
sys.path.append("./dsls")
sys.path.append("../")
sys.path.append("../../")
from tqdm import tqdm
import torch
from ShapeMOD import DSL, Function, ProgNode, OrderedProg
import pickle
import re
import sa_utils as utils
import importlib
def clamp(v, a, b):
return min(max(v, a), b)
def make_function(name, args):
args = [str(arg) for arg in args]
return '{}({})'.format(name, ", ".join(args))
def getCuboidDims(F, f_params, j_param_map, bbox_dims):
params = [1.0] + bbox_dims.tolist()
f_params = f_params.tolist()
for var in j_param_map:
if var[0] == 'd':
params.append('dummy')
else:
params.append(f_params.pop(0))
cube_dims = []
for fn, logic in zip(F.structure, F.getLogic()):
if fn != "Cuboid":
continue
cube_line = []
for var in logic[:3]:
assert var[0] == 'f'
sum = 0.
for ind, scale in var[1]:
sum += float(scale) * params[ind]
cube_line.append(sum)
cube_dims.append(cube_line)
return cube_dims
def tensorize(prog, func_map, max_cparams, max_dparams):
if prog is None:
return None
nmap = {
'bbox': 0,
'X': 0,
'Y': 1,
'Z': 2,
'left': 0,
'right': 1,
'bot': 2,
'top': 3,
'back': 4,
'front': 5
}
cn_map = {0:0}
t_lines = [('START',)]
count = 1
child_map = [[]]
for ln, (ret, line) in enumerate(prog):
child_inds = []
for n in ret:
if 'cube' in n:
nmap[f'temp{int(n[4:])}'] = count
cn_map[int(n[4:])+1] = count
child_inds.append(count)
count += 1
elif 'bbox' in n:
child_inds.append(0)
t_lines.append([l.replace('cube','temp') if isinstance(l, str) else l for l in line])
child_map.append(child_inds)
t_lines.append(('STOP',))
child_map.append([])
prog_funcs = []
prog_cvars = []
prog_dvars = []
for t_line in t_lines:
prog_funcs.append(func_map[t_line[0]])
line_cvars = torch.zeros(max_cparams).float()
line_dvars = torch.zeros(max_dparams).long()
dv_count = 0
cv_count = 0
for par in t_line[1:]:
if par in nmap:
line_dvars[dv_count] = nmap[par]
dv_count += 1
else:
line_cvars[cv_count] = float(par)
cv_count += 1
prog_cvars.append(line_cvars)
prog_dvars.append(line_dvars)
prog_funcs = torch.tensor(prog_funcs)
prog_cvars = torch.stack(prog_cvars)
prog_dvars = torch.stack(prog_dvars)
return (prog_funcs, prog_cvars, prog_dvars, cn_map, child_map)
def getCatTypeMap(dsl):
cat_type_map = {}
for n, f in dsl.library.items():
_cat_type_map = {}
c_count = 0
seen_inds = set()
for st, log in zip(f.getStructure(), f.getLogic()):
for l in log:
if l[0] == 'c' and l[1] == 'var' and l[2] not in seen_inds:
seen_inds.add(l[2])
name = f'c_var_{c_count}'
c_count += 1
if st == 'squeeze':
_cat_type_map[name] = 'sq'
elif st == 'translate' or st == 'reflect':
_cat_type_map[name] = 'sym'
cat_type_map[n] = _cat_type_map
return cat_type_map
def getBestProgram(dsl, node, order_thresh = 1):
def scoreOrder(ord):
sum = 0.
place = 1.
for o in ord:
sum += o[1] * place
place *= .1
return sum
best_score = 1e8
best_program = None
res = []
if len(node.orders) == 0:
return None
for o in node.orders[:dsl.abs_order_num]:
canon_sig, line_attrs, ret_attrs = o.canon_info
score, program, _ = dsl.getApproxBestOps(line_attrs, ret_attrs)
res.append((score, canon_sig, program))
res.sort()
best_score = res[0][0]
order_res = [
(scoreOrder(r[1]), r[2]) for r in res
if r[0] < (best_score + order_thresh)
]
order_res.sort()
best_program = order_res[0][1]
return best_program
def getBestPrograms(dsl, nodes):
best_progs = []
for node in tqdm(nodes):
bp = getBestProgram(dsl, node)
best_progs.append(bp)
return best_progs
def form_training_data(dsl, nodes):
max_cparams = 0 # start with attach max
max_dparams = 0 # start with squeeze max
func_map = {'START': 0, 'STOP': 1}
cparam_map = {0: [], 1: []}
dparam_map = {0: [], 1: []}
jparam_map = {0: [], 1: []}
num_cube_map = {0: 0, 1: 0}
cat_type_map = getCatTypeMap(dsl)
for n, f in dsl.library.items():
func_map[n] = len(func_map)
interf = f.getInterface()
structure = f.getStructure()
cparam_map[func_map[n]] = []
dparam_map[func_map[n]] = []
jparam_map[func_map[n]] = []
for i in interf:
_i = i.split('_')[0]
if _i == 'i':
jparam_map[func_map[n]].append(f'd_{len(dparam_map[func_map[n]])}')
dparam_map[func_map[n]].append(_i)
elif _i == 'c':
jparam_map[func_map[n]].append(f'd_{len(dparam_map[func_map[n]])}')
dparam_map[func_map[n]].append(cat_type_map[n][i])
else:
jparam_map[func_map[n]].append(f'c_{len(cparam_map[func_map[n]])}')
cparam_map[func_map[n]].append(_i)
num_cube_map[func_map[n]] = structure.count("Cuboid")
max_cparams = max(len(cparam_map[func_map[n]]), max_cparams)
max_dparams = max(len(dparam_map[func_map[n]]), max_dparams)
best_progs = getBestPrograms(dsl, nodes)
best_progs = [tensorize(bp, func_map, max_cparams, max_dparams) for bp in best_progs]
return best_progs, func_map, cparam_map, dparam_map, jparam_map, max_cparams, max_dparams, num_cube_map
def writeData(dsl, out_name, input_data, category):
full_nodes = pickle.load(open(input_data, 'rb'))
simp_inds = list(set([n.ind.split('_')[0] for n in full_nodes]))
inds = []
nodes = []
for node in full_nodes:
ind = node.ind
if ind.split('_')[0] in simp_inds:
inds.append(ind)
nodes.append(node)
node_tensors, func_map, cparam_map, dparam_map, jparam_map, \
max_cparams, max_dparams, num_cube_map = form_training_data(dsl, nodes)
training_data = []
metadata = {
'func_map': func_map,
'cparam_map': cparam_map,
'dparam_map': dparam_map,
'jparam_map': jparam_map,
'max_cparams': max_cparams,
'max_dparams': max_dparams,
'num_cube_map': num_cube_map,
'dsl': dsl,
}
metadata['rev_func_map'] = {v:k for k, v in metadata['func_map'].items()}
for key in ('i', 'sq', 'sym'):
metadata[f'max_d_{key}_params'] = max([
len([ __l for __l in _l if __l == key])
for _i, _l in metadata['dparam_map'].items()
])
for ind in simp_inds:
good_ind = True
if f'{ind}_{category}' not in inds:
print("Failed a lookup on root")
continue
root = {'lookup': f'{ind}_{category}'}
q = [root]
while(len(q) > 0):
node = q.pop(0)
index = inds.index(node['lookup'])
node['children_names'] = nodes[index].children_names
if node_tensors[index] is None:
good_ind = False
q = []
break
node['func_gt'] = node_tensors[index][0].numpy()
node['cparam_gt'] = node_tensors[index][1].numpy()
node['dparam_gt'] = node_tensors[index][2].numpy()
node['child_gt'] = node_tensors[index][4]
node['children'] = []
child_order = {v:k for k,v in node_tensors[index][3].items()}
children_names = node.pop('children_names')
for ci in range(len(children_names)):
cn = children_names[child_order[ci]]
c_lookup = f'{ind}_{cn}'
if c_lookup in inds:
c_node = {'lookup': c_lookup}
q.append(c_node)
node['children'].append(c_node)
else:
node['children'].append({})
if good_ind:
training_data.append((ind, root))
else:
print("Failed")
pickle.dump(training_data, open(f'{out_name}_train.data', 'wb'))
pickle.dump(metadata, open(f'{out_name}_train.meta', 'wb'))
def indToFace(i):
m = ['left', 'right', 'bot', 'top', 'back', 'front']
return m[i]
def indToAxis(i):
m = ['X', 'Y', 'Z']
return m[i]
def indToCube(i):
if i == 0:
return 'bbox'
else:
return f'cube{i-1}'
def getSALine(logic, d_line, d_ret):
sa_line = []
for var in logic:
if var[0] == 'f':
sum = 0.
for ind, scale in var[1]:
sum += float(scale) * d_line[ind]
sa_line.append(sum)
elif var[0] == 'i':
if var[1] == 'const':
sa_line.append(var[2])
elif var[1] == 'ret':
sa_line.append(d_ret[var[2]])
else:
sa_line.append(d_line[var[2]])
elif var[0] == 'b':
if var[1] == 'const':
sa_line.append(var[2])
else:
sa_line.append(d_line[var[2]])
elif var[0] == 'c':
if var[1] == 'const':
sa_line.append(var[2])
else:
sa_line.append(d_line[var[2]])
else:
assert False, 'bad sa line arg'
return sa_line
def makeSALines(F, d_line, d_ret, last_cube):
ret_num = 0
sa_lines = []
for fn, logic in zip(F.structure, F.getLogic()):
params = getSALine(logic, d_line, d_ret)
if fn == 'Cuboid':
params[0] = clamp(params[0], 0.01, d_line[1])
params[1] = clamp(params[1], 0.01, d_line[2])
params[2] = clamp(params[2], 0.01, d_line[3])
else:
params.insert(0, last_cube)
if fn == 'attach':
for i in range(2, 8):
params[i] = clamp(params[i], 0.0, 1.0)
# FLIP BBOX ATTACHES
if params[1] == 'bbox':
params[6] = 1 - params[6]
# Sem Validity that attaches to BBox need to be top or bot
if params[6] <= .5 and params[6] > .1:
params[6] = 0.1
elif params[6] >= .5 and params[6] <.9:
params[6] = 0.9
elif fn == 'squeeze':
for i in range(4, 6):
params[i] = clamp(params[i], 0.0, 1.0)
elif fn == 'translate':
params[2] = max(round(params[2] * utils.TRANS_NORM), 1)
params[3] = clamp(params[3], 0.0, 1.0)
mf = make_function(fn, params)
ret = ""
if fn == "Cuboid":
ret = f"{d_ret[ret_num]} = "
last_cube = d_ret[ret_num]
ret_num += 1
sa_lines.append(ret + mf)
return sa_lines, last_cube
def makeSAProg(dsl, dsl_prog):
bbox_dims = dsl_prog[0][1][1:]
sa_prog = [f"{dsl_prog[0][0][0]} = {utils.make_function(dsl_prog[0][1][0], bbox_dims)}"]
last_cube = 'bbox'
for d_ret, d_line in dsl_prog[1:]:
new_lines, last_cube = makeSALines(
dsl.library[d_line[0]],
[1.0] + list(bbox_dims[:3]) + list(d_line[1:]),
d_ret,
last_cube
)
sa_prog += new_lines
return sa_prog
def fillProgram(dsl, hier, meta, func_field, cparam_field, dparam_field):
prog = []
cube_num = -1
f_gt = hier[func_field].tolist()
cp_gt = hier[cparam_field].tolist()
dp_gt = hier[dparam_field].tolist()
inv_func_map = {v:k for k,v in meta['func_map'].items()}
for f_num, cl_prm, dl_prm in zip(f_gt, cp_gt, dp_gt):
if f_num <= 1:
continue
fn = inv_func_map[f_num]
line = [fn]
j_prm = meta['jparam_map'][f_num]
for j in j_prm:
info = j.split('_')
index = int(info[1])
if info[0] == 'c':
lookup = cl_prm
ptype = meta['cparam_map'][f_num][index]
else:
lookup = dl_prm
ptype = meta['dparam_map'][f_num][index]
if ptype == 'f':
line.append(round(float(lookup[index]), 2))
elif ptype == 'i':
if lookup[index] == 0:
line.append('bbox')
else:
line.append(f'cube{int(lookup[index])-1}')
elif ptype == 'sq':
line.append(indToFace(lookup[index]))
elif ptype == 'sym':
line.append(indToAxis(lookup[index]))
elif ptype == 'b':
line.append(True if lookup[index] > 0 else False)
else:
assert False, f'bad input {j_prm}'
ret = []
struct = dsl.library[fn].getStructure()
for s in struct:
if s == "Cuboid":
if cube_num < 0:
ret.append(f'bbox')
else:
ret.append(f'cube{cube_num}')
cube_num += 1
prog.append((tuple(ret), tuple(line)))
hier['dsl_prog'] = prog
hier['prog'] = makeSAProg(dsl, prog)
for c in hier['children']:
if len(c) > 0:
fillProgram(dsl, c, meta, func_field, cparam_field, dparam_field)
def testGetProgram(dsl, out_name):
from ShapeAssembly import hier_execute
data = pickle.load(open(f'{out_name}_train.data', "rb"))
metadata = pickle.load(open(f'{out_name}_train.meta', "rb"))
for d in data:
print(d[0])
fillProgram(dsl, d[1], metadata, 'func_gt', 'cparam_gt', 'dparam_gt')
prog_lines = utils.getHierProgLines(d[1], 'dsl_prog')
for l in prog_lines:
print(l)
verts, faces = hier_execute(d[1])
utils.writeObj(verts, faces, f'{d[0]}_dsl_gt.obj')
def main(disc, out_name, in_dir, cat, config_path):
config_mod = importlib.import_module(config_path)
config = config_mod.loadConfig()
dsl = DSL(config)
for name, func in disc.ADD_FUNCS:
dsl.library[name] = Function(func, dsl)
dsl.full_library[name] = Function(func, dsl)
for name in disc.RM_FUNCS:
dsl.library.pop(name)
with torch.no_grad():
writeData(dsl, out_name, in_dir, cat)
if __name__ == "__main__":
out_name = sys.argv[1]
input_data = sys.argv[2]
cat = sys.argv[3]
disc = importlib.import_module(f'dsls.{sys.argv[4]}')
config_path = sys.argv[5]
main(disc, out_name, input_data, cat, config_path)
| 15,809 | 29.057034 | 107 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/sem_valid.py | import torch, os, sys, random
import sa_utils as utils
import pickle
import model_prog as mp
from copy import deepcopy
from torch.distributions import Categorical
from make_abs_data import fillProgram, makeSALines, getCuboidDims
from ShapeAssembly import hier_execute, Program
import numpy as np
MAX_TRIES = 10
REJECT_MODE = 'HARD'
BE = 1.1
MAX_SEQ = 30
sq_map = {
0:'left',
1:'right',
2:'bot',
3:'top',
4:'back',
5:'front'
}
sym_map = {
0: 'X',
1: 'Y',
2: 'Z'
}
def sem_eval_forward(net, inp_seq, code, code_start, bb_dims, hier_ind, P, sample):
bb_dims = bb_dims.unsqueeze(0).unsqueeze(0).repeat(1,inp_seq.shape[1],1)
hier_oh = torch.zeros(1, inp_seq.shape[1], mp.MAX_DEPTH).to(mp.device)
hier_oh[0, :, min(hier_ind, 2)] = 1.0
inp = net.inp_net(
torch.cat(
(inp_seq, bb_dims, hier_oh), dim=2)
)
gru_out, h = net.gru(inp, code.view(1,1,-1))
out = torch.zeros(inp_seq.shape, device=mp.device).float()
commands = None
for _net in net.net_list:
if _net.func is not None:
assert commands is not None
if _net.func != commands:
continue
if _net.line_cond is not None:
line_cond = out[:,:,_net.line_cond[0]:_net.line_cond[1]]
else:
line_cond = torch.zeros(inp_seq.shape[0], inp_seq.shape[1], 0, device=mp.device)
if _net.bb_cond is True:
bb_cond = bb_dims
else:
bb_cond = torch.zeros(inp_seq.shape[0], inp_seq.shape[1], 0, device=mp.device)
raw_out = _net(torch.cat((
gru_out, line_cond, bb_cond
), dim=2))
if _net._type == 'func':
dist = torch.softmax(raw_out.squeeze(), dim=0)
mask = torch.zeros(dist.shape).float()
if len(P.cuboids) == 0:
mask[2] = 1.0
else:
if len(P.cuboids) >= 2:
lcmd = P.last_command
else:
lcmd = 'start'
mask[torch.tensor(P.cmd_to_func_masks[lcmd])] = 1.0
dist = dist * mask.to(mp.device)
dist[0] = 0.
if sample:
cmd = Categorical(
dist
).sample()
else:
cmd = dist.argmax().item()
out[0,0, _net.start+cmd] = 1.0
assert commands == None
commands = cmd
elif _net._type == 'disc':
dist = torch.softmax(raw_out.squeeze(), dim=0)
if sample:
m = Categorical(
dist
).sample()
else:
m = dist.argmax().item()
out[0,0, _net.start+m] = 1.0
elif _net._type == 'b':
if sample:
r = torch.distributions.Bernoulli(
torch.sigmoid(raw_out.squeeze())
).sample().float()
else:
r = (raw_out.squeeze() >= 0.).float()
out[0,0,_net.start:_net.end] = r
elif _net._type == 'f':
r = raw_out.squeeze()
out[0,0,_net.start:_net.end] = r
double_enc = torch.cat((
gru_out, code_start.repeat(1, gru_out.shape[1], 1)
), dim = 2)
child_pred = net.child_net(
double_enc
)
next_codes = net.next_code_net(
double_enc
).view(inp_seq.shape[0], inp_seq.shape[1], net.max_children, -1)
return out, next_codes, child_pred, h
def decode_sa_line(net, P, out, bbox_dims, meta):
f_num, cl_prm, dl_prm = net.decode_line(out)
assert f_num != 0, 'this should never happen'
if f_num == 1:
return ['<END>']
inv_func_map = {v:k for k,v in meta['func_map'].items()}
fn = inv_func_map[f_num]
line = [fn]
j_prm = meta['jparam_map'][f_num]
for j in j_prm:
info = j.split('_')
index = int(info[1])
if info[0] == 'c':
lookup = cl_prm
ptype = meta['cparam_map'][f_num][index]
else:
lookup = dl_prm
ptype = meta['dparam_map'][f_num][index]
if ptype == 'f':
line.append(round(float(lookup[index]), 2))
elif ptype == 'i':
if lookup[index] == 0:
line.append('bbox')
else:
line.append(f'cube{int(lookup[index])-1}')
elif ptype == 'sq':
line.append(sq_map[lookup[index].item()])
elif ptype == 'sym':
line.append(sym_map[lookup[index].item()])
elif ptype == 'b':
line.append(bool(lookup[index]))
else:
assert False, f'bad input {j_prm}'
ret = []
struct = meta['dsl'].library[fn].getStructure()
cube_num = len([c for c in P.cuboids.values() if c.parent is None])
for s in struct:
if s == "Cuboid":
if cube_num == 0:
ret.append(f'bbox')
else:
ret.append(f'cube{cube_num-1}')
cube_num += 1
d_ret = tuple(ret)
d_line = tuple(line)
sa_lines, _ = makeSALines(
meta['dsl'].library[d_line[0]],
[1.0] + bbox_dims + list(d_line[1:])
, d_ret,
P.last_cuboid
)
return sa_lines
def insideBBox(P):
bbox_corners = P.cuboids[f"bbox"].getCorners()
maxb = bbox_corners.max(dim=0).values
minb = bbox_corners.min(dim=0).values
assert (maxb > 0).all()
assert (minb < 0).all()
maxb = (maxb + 0.1) * BE
minb = (minb - 0.1) * BE
for ci in P.cuboids:
if ci == 'bbox':
continue
corners = P.cuboids[ci].getCorners()
maxc = corners.max(dim=0).values
minc = corners.min(dim=0).values
if (maxc > maxb).any():
return False
if (minc < minb).any():
return False
return True
def checkValidLine(net, P, out, bbox_dims, meta):
sa_lines = decode_sa_line(net, P, out, bbox_dims, meta)
attaches_to_add = []
cube_syms = []
for line in sa_lines:
if '<END>' in line:
# Make sure nothing is left unmoved
for v in P.cube_attaches.values():
if len(v) == 0:
return None, False
if 'Cuboid(' in line:
parse = P.parseCuboid(line)
P.last_cuboid = parse[0]
if 'bbox' not in line:
P.last_command = 'Cuboid'
P.cube_attaches[parse[0]] = []
if 'attach' in line:
parse = P.parseAttach(line)
if parse[0] != P.last_cuboid:
assert False, 'how did this fail'
return None, False
P.last_command = 'attach'
attaches_to_add.append((parse[0], parse[1]))
if 'squeeze' in line:
parse = P.parseSqueeze(line)
if parse[0] != P.last_cuboid:
assert False, 'how did this fail'
return None, False
P.last_command = 'squeeze'
attaches_to_add.append((parse[0], parse[1]))
attaches_to_add.append((parse[0], parse[2]))
if 'translate' in line:
parse = P.parseTranslate(line)
if parse[0] != P.last_cuboid:
assert False, 'how did this fail'
return None, False
P.last_command = 'translate'
cube_syms.append(parse[0])
if 'reflect' in line:
parse = P.parseReflect(line)
if parse[0] != P.last_cuboid:
assert False, 'how did this fail'
return None, False
P.last_command = 'reflect'
cube_syms.append(parse[0])
try:
P.execute(line)
P.last_command_cuboid = 'Cuboid(' in line
except Exception as e:
if mp.VERBOSE:
print(f"failed line {line} with {e}")
# Return none
return None, False
for a, o in attaches_to_add:
past_attaches = P.cube_attaches[a]
# Attached to non bounding box more than once
if o != 'bbox' and o in past_attaches:
return None, False
# Already has max attachments
if len(past_attaches) == 2:
return None, False
P.cube_attaches[a].append(o)
for c in cube_syms:
if c in P.cube_syms:
return None, False
P.cube_syms.add(c)
return P, insideBBox(P)
def getSpecFuncs(meta, types):
l = []
if 'end' in types:
l.append(1)
dsl = meta['dsl']
for k, v in meta['func_map'].items():
if k in dsl.library:
if dsl.library[k].structure[0] in types:
l.append(v)
return l
def sem_eval_prog(net, code, rejection_sample = True):
try:
return _sem_eval_prog(net, code, rejection_sample, None)
except AssertionError as e:
if e.args[0] != 'rejection sample':
raise e
return {}
def _sem_eval_prog(net, code, rejection_sample, node):
is_root = False
if node is None:
is_root = True
bb_dims = net.bb_net(code)
node = {
'depth': 0,
'bb_dims': bb_dims
}
if node['depth'] > mp.MAX_DEPTH:
node.pop('depth')
node.pop('bb_dims')
return
h = code.view(1,1, -1)
h_start = h.clone()
inp = net.getStartLine()
out_lines = []
children = []
P = Program()
P.reject = False
P.cube_attaches = {}
P.cube_syms = set()
meta = net.metadata
P.last_cuboid = None
P.last_command = 'start'
P.cmd_to_func_masks = {
'start': getSpecFuncs(meta, ('Cuboid')),
'Cuboid': getSpecFuncs(meta, ('attach', 'squeeze')),
'attach': getSpecFuncs(meta, ('attach', 'reflect', 'translate', 'Cuboid', 'end')),
'squeeze': getSpecFuncs(meta, ('reflect', 'translate', 'Cuboid', 'end')),
'reflect': getSpecFuncs(meta, ('Cuboid', 'end')),
'translate': getSpecFuncs(meta, ('Cuboid', 'end'))
}
P.cuboids.pop('bbox')
for i in range(MAX_SEQ):
should_add = True
for j in range(MAX_TRIES):
# Failed to make valid prog
if j == (MAX_TRIES-1) or (P.reject and rejection_sample):
P.reject = True
should_add = False
break
new_inp, pnext, pchild, new_h = sem_eval_forward(
net, inp, h, h_start, node['bb_dims'], node['depth'], P, j > 0
)
clean_out = new_inp.squeeze()
new_P, valid = checkValidLine(
net,
deepcopy(P),
clean_out,
node['bb_dims'].tolist(),
net.metadata
)
if not valid:
if REJECT_MODE == 'HARD':
P.reject = True
continue
inp = new_inp
h = new_h
P = new_P
break
if not should_add:
break
fstart, fend = net.metadata['tl_map']['func']
func_ind = torch.argmax(clean_out[fstart:fend]).item()
if func_ind == 1:
break
out_lines.append(clean_out)
child_pred = pchild[0][0]
next_codes = pnext[0][0]
_, _c, _ = net.decode_line(clean_out)
cube_dims = getCuboidDims(
net.metadata['dsl'].library[
net.metadata['rev_func_map'][func_ind]
],
_c,
net.metadata['jparam_map'][func_ind],
node['bb_dims']
)
cube_dims = torch.tensor(cube_dims,device=child_pred.device)
for i in range(net.metadata['num_cube_map'][func_ind]):
if child_pred[i].item() >= 0.0:
child = {
'depth': node['depth']+1,
'bb_dims': cube_dims[i]
}
children.append(child)
_sem_eval_prog(net, next_codes[i], rejection_sample, child)
else:
children.append({})
node['children'] = children
out_funcs, out_cprms, out_dprms = net.split_lines(out_lines)
node[mp.FUNC_PRED_FIELD] = torch.tensor(out_funcs)
node[mp.CPARAM_PRED_FIELD] = torch.stack(out_cprms) if len(out_cprms) > 0 else torch.tensor([])
node[mp.DPARAM_PRED_FIELD] = torch.stack(out_dprms) if len(out_dprms) > 0 else torch.tensor([])
if P.reject and rejection_sample:
assert False, 'rejection sample'
return node
| 13,496 | 27.656051 | 99 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/pointnet_fd.py | from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from torch.autograd import Variable
import json
import torch.nn.functional as F
from tqdm import tqdm
import numpy as np
import pickle
from sa_utils import sample_surface
from scipy import linalg
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
class STN3d(nn.Module):
def __init__(self):
super(STN3d, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1).to(x.device)
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = torch.nn.Conv1d(k, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k*k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1,self.k*self.k).repeat(batchsize,1)
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetfeat(nn.Module):
def __init__(self, global_feat = True, feature_transform = False):
super(PointNetfeat, self).__init__()
self.stn = STN3d()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.global_feat = global_feat
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
def forward(self, x):
n_pts = x.size()[2]
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = F.relu(self.bn1(self.conv1(x)))
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
else:
trans_feat = None
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
if self.global_feat:
return x, trans, trans_feat
else:
x = x.view(-1, 1024, 1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class PointNetCls(nn.Module):
def __init__(self, k=2, feature_transform=False):
super(PointNetCls, self).__init__()
self.feature_transform = feature_transform
self.feat = PointNetfeat(global_feat=True, feature_transform=feature_transform)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k)
self.dropout = nn.Dropout(p=0.3)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.relu = nn.ReLU()
def get_emb(self, x):
x1, trans, trans_feat = self.feat(x)
x2 = F.relu(self.bn1(self.fc1(x1)))
x3 = self.fc2(x2)
return x3
def forward(self, x):
x1, trans, trans_feat = self.feat(x)
x2 = F.relu(self.bn1(self.fc1(x1)))
x3 = F.relu(self.bn2(self.fc2(x2)))
xy = self.fc3(x3)
return F.log_softmax(xy, dim=1), trans, trans_feat, x1
class PointNetDenseCls(nn.Module):
def __init__(self, k = 2, feature_transform=False):
super(PointNetDenseCls, self).__init__()
self.k = k
self.feature_transform=feature_transform
self.feat = PointNetfeat(global_feat=False, feature_transform=feature_transform)
self.conv1 = torch.nn.Conv1d(1088, 512, 1)
self.conv2 = torch.nn.Conv1d(512, 256, 1)
self.conv3 = torch.nn.Conv1d(256, 128, 1)
self.conv4 = torch.nn.Conv1d(128, self.k, 1)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(128)
def forward(self, x):
batchsize = x.size()[0]
n_pts = x.size()[2]
x, trans, trans_feat = self.feat(x)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.conv4(x)
x = x.transpose(2,1).contiguous()
x = F.log_softmax(x.view(-1,self.k), dim=-1)
x = x.view(batchsize, n_pts, self.k)
return x, trans, trans_feat
def feature_transform_regularizer(trans):
d = trans.size()[1]
batchsize = trans.size()[0]
I = torch.eye(d)[None, :, :]
loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2,1)) - I, dim=(1,2)))
return loss
def get_fd(s1, s2, device=None):
class ClassificationDataset():
def __init__(self, samples):
super(ClassificationDataset, self).__init__()
self.datas = samples
self.N = len(self.datas)
def __len__(self):
return self.N
def __getitem__(self,index):
return torch.clamp(self.datas[index], -1.0, 1.0)
num_classes = 16
test_dataset1 = ClassificationDataset(s1)
test_dataset2 = ClassificationDataset(s2)
test_loader1 = torch.utils.data.DataLoader(
test_dataset1,
batch_size = 16,
shuffle = True
)
test_loader2 = torch.utils.data.DataLoader(
test_dataset2,
batch_size = 16,
shuffle = True
)
classifier = PointNetCls(k=num_classes, feature_transform=False)
classifier.load_state_dict(torch.load("data/pgp_pointnet_cls_model_249.pth"))
classifier = classifier.eval()
if device is not None:
classifier = classifier.to(device)
def calculate_activation_statistics(loader):
embs = []
for i, points in enumerate(loader, 0):
points = points.transpose(2, 1)
if device is not None:
points = points.to(device)
_, _, _, emb = classifier(points)
embs.append(emb.detach().cpu())
act = torch.cat(embs, dim=0).numpy()
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
m1, s1 = calculate_activation_statistics(test_loader1)
m2, s2 = calculate_activation_statistics(test_loader2)
return calculate_frechet_distance(m1, s1, m2, s2)
if __name__ == '__main__':
import os
import utils
import sys
from ShapeAssembly import hier_execute
def load_samps(fold, start, per):
inds = os.listdir(fold)
s = []
for ind in tqdm(inds[start:per+start]):
hp = utils.loadHPFromFile(fold+ind)
verts, faces = hier_execute(hp)
s.append((verts, faces))
return s
s1 = load_samps(sys.argv[1], int(sys.argv[4]), int(sys.argv[3]))
s2 = load_samps(sys.argv[2], int(sys.argv[5]), int(sys.argv[3]))
print(get_fd(s1, s2))
| 10,819 | 32.190184 | 134 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/infer_sem_valid.py | import torch, os, sys, random
import sa_utils as utils
import pickle
import model_prog as mp
from copy import deepcopy
from torch.distributions import Categorical
from make_abs_data import fillProgram, makeSALines, getCuboidDims
from ShapeAssembly import hier_execute, Program
import numpy as np
MAX_TRIES = 50
BE = 1.5
MAX_SEQ = 30
sq_map = {
0:'left',
1:'right',
2:'bot',
3:'top',
4:'back',
5:'front'
}
sym_map = {
0: 'X',
1: 'Y',
2: 'Z'
}
def sem_eval_forward(net, inp_seq, code, code_start, bb_dims, hier_ind, P, samp_ind):
bb_dims = bb_dims.unsqueeze(0).unsqueeze(0).repeat(1,inp_seq.shape[1],1)
hier_oh = torch.zeros(1, inp_seq.shape[1], mp.MAX_DEPTH).to(mp.device)
hier_oh[0, :, min(hier_ind, 2)] = 1.0
inp = net.inp_net(
torch.cat(
(inp_seq, bb_dims, hier_oh), dim=2)
)
gru_out, h = net.gru(inp, code.view(1,1,-1))
out = torch.zeros(inp_seq.shape, device=mp.device).float()
commands = None
for _net in net.net_list:
if _net.func is not None:
assert commands is not None
if _net.func != commands:
continue
if _net.line_cond is not None:
line_cond = out[:,:,_net.line_cond[0]:_net.line_cond[1]]
else:
line_cond = torch.zeros(inp_seq.shape[0], inp_seq.shape[1], 0, device=mp.device)
if _net.bb_cond is True:
bb_cond = bb_dims
else:
bb_cond = torch.zeros(inp_seq.shape[0], inp_seq.shape[1], 0, device=mp.device)
raw_out = _net(torch.cat((
gru_out, line_cond, bb_cond
), dim=2))
if _net._type == 'func':
if samp_ind == 0:
dist = torch.softmax(raw_out.squeeze(), dim=0)
else:
dist = torch.softmax(
raw_out.squeeze() \
/ (samp_ind * 1.0),
dim=0
)
mask = torch.zeros(dist.shape).float()
if len(P.cuboids) == 0:
mask[2] = 1.0
else:
if len(P.cuboids) >= 2:
lcmd = P.last_command
else:
lcmd = 'start'
mask[torch.tensor(P.cmd_to_func_masks[lcmd])] = 1.0
dist = dist * mask.to(mp.device)
dist[0] = 0.
if samp_ind > 0:
cmd = Categorical(
dist
).sample()
else:
cmd = dist.argmax().item()
out[0,0, _net.start+cmd] = 1.0
assert commands == None
commands = cmd
elif _net._type == 'disc':
dist = torch.softmax(raw_out.squeeze(), dim=0)
if samp_ind > 0:
m = Categorical(
dist
).sample()
else:
m = dist.argmax().item()
out[0,0, _net.start+m] = 1.0
elif _net._type == 'b':
if samp_ind > 0:
r = torch.distributions.Bernoulli(
torch.sigmoid(raw_out.squeeze()/(1.0 * samp_ind))
).sample().float()
else:
r = (raw_out.squeeze() >= 0.).float()
out[0,0,_net.start:_net.end] = r
elif _net._type == 'f':
r = raw_out.squeeze()
out[0,0,_net.start:_net.end] = r
double_enc = torch.cat((
gru_out, code_start.repeat(1, gru_out.shape[1], 1)
), dim = 2)
child_pred = net.child_net(
double_enc
)
next_codes = net.next_code_net(
double_enc
).view(inp_seq.shape[0], inp_seq.shape[1], net.max_children, -1)
return out, next_codes, child_pred, h
def decode_sa_line(net, P, out, bbox_dims, meta):
f_num, cl_prm, dl_prm = net.decode_line(out)
assert f_num != 0, 'this should never happen'
if f_num == 1:
return ['<END>']
inv_func_map = {v:k for k,v in meta['func_map'].items()}
fn = inv_func_map[f_num]
line = [fn]
j_prm = meta['jparam_map'][f_num]
for j in j_prm:
info = j.split('_')
index = int(info[1])
if info[0] == 'c':
lookup = cl_prm
ptype = meta['cparam_map'][f_num][index]
else:
lookup = dl_prm
ptype = meta['dparam_map'][f_num][index]
if ptype == 'f':
line.append(round(float(lookup[index]), 2))
elif ptype == 'i':
if lookup[index] == 0:
line.append('bbox')
else:
line.append(f'cube{int(lookup[index])-1}')
elif ptype == 'sq':
line.append(sq_map[lookup[index].item()])
elif ptype == 'sym':
line.append(sym_map[lookup[index].item()])
elif ptype == 'b':
line.append(bool(lookup[index]))
else:
assert False, f'bad input {j_prm}'
ret = []
struct = meta['dsl'].library[fn].getStructure()
cube_num = len([c for c in P.cuboids.values() if c.parent is None])
for s in struct:
if s == "Cuboid":
if cube_num == 0:
ret.append(f'bbox')
else:
ret.append(f'cube{cube_num-1}')
cube_num += 1
d_ret = tuple(ret)
d_line = tuple(line)
sa_lines, _ = makeSALines(
meta['dsl'].library[d_line[0]],
[1.0] + bbox_dims + list(d_line[1:])
, d_ret,
P.last_cuboid
)
return sa_lines
def insideBBox(P):
bbox_corners = P.cuboids[f"bbox"].getCorners()
maxb = bbox_corners.max(dim=0).values
minb = bbox_corners.min(dim=0).values
assert (maxb > 0).all()
assert (minb < 0).all()
maxb = (maxb + 0.1) * BE
minb = (minb - 0.1) * BE
for ci in P.cuboids:
if ci == 'bbox':
continue
corners = P.cuboids[ci].getCorners()
maxc = corners.max(dim=0).values
minc = corners.min(dim=0).values
if (maxc > maxb).any():
return False
if (minc < minb).any():
return False
return True
def checkValidLine(net, P, out, bbox_dims, meta):
sa_lines = decode_sa_line(net, P, out, bbox_dims, meta)
attaches_to_add = []
cube_syms = []
for line in sa_lines:
if '<END>' in line:
# Make sure nothing is left unmoved
for v in P.cube_attaches.values():
if len(v) == 0:
return None, False
if 'Cuboid(' in line:
parse = P.parseCuboid(line)
P.last_cuboid = parse[0]
if 'bbox' not in line:
P.last_command = 'Cuboid'
P.cube_attaches[parse[0]] = []
if 'attach' in line:
parse = P.parseAttach(line)
if parse[0] != P.last_cuboid:
assert False, 'how did this fail'
return None, False
P.last_command = 'attach'
attaches_to_add.append((parse[0], parse[1]))
if 'squeeze' in line:
parse = P.parseSqueeze(line)
if parse[0] != P.last_cuboid:
assert False, 'how did this fail'
return None, False
P.last_command = 'squeeze'
attaches_to_add.append((parse[0], parse[1]))
attaches_to_add.append((parse[0], parse[2]))
if 'translate' in line:
parse = P.parseTranslate(line)
if parse[0] != P.last_cuboid:
assert False, 'how did this fail'
return None, False
P.last_command = 'translate'
cube_syms.append(parse[0])
if 'reflect' in line:
parse = P.parseReflect(line)
if parse[0] != P.last_cuboid:
assert False, 'how did this fail'
return None, False
P.last_command = 'reflect'
cube_syms.append(parse[0])
try:
P.execute(line)
P.last_command_cuboid = 'Cuboid(' in line
except Exception as e:
if mp.VERBOSE:
print(f"failed line {line} with {e}")
# Return none
return None, False
for a, o in attaches_to_add:
past_attaches = P.cube_attaches[a]
# Attached to non bounding box more than once
if o != 'bbox' and o in past_attaches:
return None, False
# Already has max attachments
if len(past_attaches) == 2:
return None, False
P.cube_attaches[a].append(o)
for c in cube_syms:
if c in P.cube_syms:
return None, False
P.cube_syms.add(c)
return P, insideBBox(P)
def getSpecFuncs(meta, types):
l = []
if 'end' in types:
l.append(1)
dsl = meta['dsl']
for k, v in meta['func_map'].items():
if k in dsl.library:
if dsl.library[k].structure[0] in types:
l.append(v)
return l
def sem_eval_prog(net, code, node=None):
is_root = False
if node is None:
is_root = True
bb_dims = net.bb_net(code)
node = {
'depth': 0,
'bb_dims': bb_dims
}
if node['depth'] > mp.MAX_DEPTH:
node.pop('depth')
node.pop('bb_dims')
return
h = code.view(1,1, -1)
h_start = h.clone()
inp = net.getStartLine()
out_lines = []
children = []
P = Program()
P.cube_attaches = {}
P.cube_syms = set()
meta = net.metadata
P.last_cuboid = None
P.last_command = 'start'
P.cmd_to_func_masks = {
'start': getSpecFuncs(meta, ('Cuboid')),
'Cuboid': getSpecFuncs(meta, ('attach', 'squeeze')),
'attach': getSpecFuncs(meta, ('attach', 'reflect', 'translate', 'Cuboid', 'end')),
'squeeze': getSpecFuncs(meta, ('reflect', 'translate', 'Cuboid', 'end')),
'reflect': getSpecFuncs(meta, ('Cuboid', 'end')),
'translate': getSpecFuncs(meta, ('Cuboid', 'end'))
}
P.cuboids.pop('bbox')
for i in range(MAX_SEQ):
should_break = False
for j in range(MAX_TRIES):
# Failed to make valid prog
if j == (MAX_TRIES-1):
keys = list(node.keys())
for k in keys:
if k in node:
node.pop(k)
should_break = True
break
new_inp, pnext, pchild, new_h = sem_eval_forward(
net, inp, h, h_start, node['bb_dims'], node['depth'], P, j
)
clean_out = new_inp.squeeze()
new_P, valid = checkValidLine(
net,
deepcopy(P),
clean_out,
node['bb_dims'].tolist(),
net.metadata
)
if not valid:
continue
inp = new_inp
h = new_h
P = new_P
break
if should_break:
break
fstart, fend = net.metadata['tl_map']['func']
func_ind = torch.argmax(clean_out[fstart:fend]).item()
if func_ind == 1:
break
out_lines.append(clean_out)
child_pred = pchild[0][0]
next_codes = pnext[0][0]
_, _c, _ = net.decode_line(clean_out)
cube_dims = getCuboidDims(
net.metadata['dsl'].library[
net.metadata['rev_func_map'][func_ind]
],
_c,
net.metadata['jparam_map'][func_ind],
node['bb_dims']
)
cube_dims = torch.tensor(cube_dims,device=child_pred.device)
for i in range(net.metadata['num_cube_map'][func_ind]):
if child_pred[i].item() >= 0.0:
child = {
'depth': node['depth']+1,
'bb_dims': cube_dims[i]
}
children.append(child)
sem_eval_prog(net, next_codes[i], child)
else:
children.append({})
if len(node) == 0:
if not is_root:
return node
else:
return dummy_prog(net)
node['children'] = children
out_funcs, out_cprms, out_dprms = net.split_lines(out_lines)
node[mp.FUNC_PRED_FIELD] = torch.tensor(out_funcs)
node[mp.CPARAM_PRED_FIELD] = torch.stack(out_cprms) if len(out_cprms) > 0 else torch.tensor([])
node[mp.DPARAM_PRED_FIELD] = torch.stack(out_dprms) if len(out_dprms) > 0 else torch.tensor([])
return node
def dummy_prog(net):
node = {}
node['children'] = [{}, {}]
node[mp.FUNC_PRED_FIELD] = torch.tensor([2,2])
node[mp.CPARAM_PRED_FIELD] = torch.ones(2, net.metadata['max_cparams']).float()
node[mp.DPARAM_PRED_FIELD] = torch.zeros(2, net.metadata['max_dparams']).long()
return node
| 13,859 | 27.401639 | 99 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/etw_pytorch_utils.py | # From https://github.com/erikwijmans
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch.nn as nn
import os
import torch
import torch.nn as nn
from torch.autograd.function import InplaceFunction
from itertools import repeat
import numpy as np
import shutil
import tqdm
from scipy.stats import t as student_t
import statistics as stats
class SharedMLP(nn.Sequential):
def __init__(
self,
args,
bn=False,
activation=nn.ReLU(inplace=True),
preact=False,
first=False,
name="",
):
# type: (SharedMLP, List[int], bool, Any, bool, bool, AnyStr) -> None
super(SharedMLP, self).__init__()
for i in range(len(args) - 1):
self.add_module(
name + "layer{}".format(i),
Conv2d(
args[i],
args[i + 1],
bn=(not first or not preact or (i != 0)) and bn,
activation=activation
if (not first or not preact or (i != 0))
else None,
preact=preact,
),
)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super(_BNBase, self).__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size, name=""):
# type: (BatchNorm1d, int, AnyStr) -> None
super(BatchNorm1d, self).__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size, name=""):
# type: (BatchNorm2d, int, AnyStr) -> None
super(BatchNorm2d, self).__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class BatchNorm3d(_BNBase):
def __init__(self, in_size, name=""):
# type: (BatchNorm3d, int, AnyStr) -> None
super(BatchNorm3d, self).__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)
class _ConvBase(nn.Sequential):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
dilation,
activation,
bn,
init,
conv=None,
norm_layer=None,
bias=True,
preact=False,
name="",
):
super(_ConvBase, self).__init__()
bias = bias and (not bn)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if not preact:
bn_unit = norm_layer(out_size)
else:
bn_unit = norm_layer(in_size)
if preact:
if bn:
self.add_module(name + "normlayer", bn_unit)
if activation is not None:
self.add_module(name + "activation", activation)
self.add_module(name + "conv", conv_unit)
if not preact:
if bn:
self.add_module(name + "normlayer", bn_unit)
if activation is not None:
self.add_module(name + "activation", activation)
class Conv1d(_ConvBase):
def __init__(
self,
in_size,
out_size,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
activation=nn.ReLU(inplace=True),
bn=False,
init=nn.init.kaiming_normal_,
bias=True,
preact=False,
name="",
norm_layer=BatchNorm1d,
):
# type: (Conv1d, int, int, int, int, int, int, Any, bool, Any, bool, bool, AnyStr, _BNBase) -> None
super(Conv1d, self).__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
dilation,
activation,
bn,
init,
conv=nn.Conv1d,
norm_layer=norm_layer,
bias=bias,
preact=preact,
name=name,
)
class Conv2d(_ConvBase):
def __init__(
self,
in_size,
out_size,
kernel_size=(1, 1),
stride=(1, 1),
padding=(0, 0),
dilation=(1, 1),
activation=nn.ReLU(inplace=True),
bn=False,
init=nn.init.kaiming_normal_,
bias=True,
preact=False,
name="",
norm_layer=BatchNorm2d,
):
# type: (Conv2d, int, int, Tuple[int, int], Tuple[int, int], Tuple[int, int], Tuple[int, int], Any, bool, Any, bool, bool, AnyStr, _BNBase) -> None
super(Conv2d, self).__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
dilation,
activation,
bn,
init,
conv=nn.Conv2d,
norm_layer=norm_layer,
bias=bias,
preact=preact,
name=name,
)
class Conv3d(_ConvBase):
def __init__(
self,
in_size,
out_size,
kernel_size=(1, 1, 1),
stride=(1, 1, 1),
padding=(0, 0, 0),
dilation=(1, 1, 1),
activation=nn.ReLU(inplace=True),
bn=False,
init=nn.init.kaiming_normal_,
bias=True,
preact=False,
name="",
norm_layer=BatchNorm3d,
):
# type: (Conv3d, int, int, Tuple[int, int, int], Tuple[int, int, int], Tuple[int, int, int], Tuple[int, int, int], Any, bool, Any, bool, bool, AnyStr, _BNBase) -> None
super(Conv3d, self).__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
dilation,
activation,
bn,
init,
conv=nn.Conv3d,
norm_layer=norm_layer,
bias=bias,
preact=preact,
name=name,
)
class FC(nn.Sequential):
def __init__(
self,
in_size,
out_size,
activation=nn.ReLU(inplace=True),
bn=False,
init=None,
preact=False,
name="",
):
# type: (FC, int, int, Any, bool, Any, bool, AnyStr) -> None
super(FC, self).__init__()
fc = nn.Linear(in_size, out_size, bias=not bn)
if init is not None:
init(fc.weight)
if not bn:
nn.init.constant_(fc.bias, 0)
if preact:
if bn:
self.add_module(name + "bn", BatchNorm1d(in_size))
if activation is not None:
self.add_module(name + "activation", activation)
self.add_module(name + "fc", fc)
if not preact:
if bn:
self.add_module(name + "bn", BatchNorm1d(out_size))
if activation is not None:
self.add_module(name + "activation", activation)
class _DropoutNoScaling(InplaceFunction):
@staticmethod
def _make_noise(input):
return input.new().resize_as_(input)
@staticmethod
def symbolic(g, input, p=0.5, train=False, inplace=False):
if inplace:
return None
n = g.appendNode(
g.create("Dropout", [input]).f_("ratio", p).i_("is_test", not train)
)
real = g.appendNode(g.createSelect(n, 0))
g.appendNode(g.createSelect(n, 1))
return real
@classmethod
def forward(cls, ctx, input, p=0.5, train=False, inplace=False):
if p < 0 or p > 1:
raise ValueError(
"dropout probability has to be between 0 and 1, " "but got {}".format(p)
)
ctx.p = p
ctx.train = train
ctx.inplace = inplace
if ctx.inplace:
ctx.mark_dirty(input)
output = input
else:
output = input.clone()
if ctx.p > 0 and ctx.train:
ctx.noise = cls._make_noise(input)
if ctx.p == 1:
ctx.noise.fill_(0)
else:
ctx.noise.bernoulli_(1 - ctx.p)
ctx.noise = ctx.noise.expand_as(input)
output.mul_(ctx.noise)
return output
@staticmethod
def backward(ctx, grad_output):
if ctx.p > 0 and ctx.train:
return grad_output.mul(ctx.noise), None, None, None
else:
return grad_output, None, None, None
dropout_no_scaling = _DropoutNoScaling.apply
class _FeatureDropoutNoScaling(_DropoutNoScaling):
@staticmethod
def symbolic(input, p=0.5, train=False, inplace=False):
return None
@staticmethod
def _make_noise(input):
return input.new().resize_(
input.size(0), input.size(1), *repeat(1, input.dim() - 2)
)
feature_dropout_no_scaling = _FeatureDropoutNoScaling.apply
def group_model_params(model, **kwargs):
# type: (nn.Module, ...) -> List[Dict]
decay_group = []
no_decay_group = []
for name, param in model.named_parameters():
if name.find("normlayer") != -1 or name.find("bias") != -1:
no_decay_group.append(param)
else:
decay_group.append(param)
assert len(list(model.parameters())) == len(decay_group) + len(no_decay_group)
return [
dict(params=decay_group, **kwargs),
dict(params=no_decay_group, weight_decay=0.0, **kwargs),
]
def checkpoint_state(model=None, optimizer=None, best_prec=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.DataParallel):
model_state = model.module.state_dict()
else:
model_state = model.state_dict()
else:
model_state = None
return {
"epoch": epoch,
"it": it,
"best_prec": best_prec,
"model_state": model_state,
"optimizer_state": optim_state,
}
def save_checkpoint(state, is_best, filename="checkpoint", bestname="model_best"):
filename = "{}.pth.tar".format(filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "{}.pth.tar".format(bestname))
def load_checkpoint(model=None, optimizer=None, filename="checkpoint"):
filename = "{}.pth.tar".format(filename)
if os.path.isfile(filename):
print("==> Loading from checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
epoch = checkpoint["epoch"]
it = checkpoint.get("it", 0.0)
best_prec = checkpoint["best_prec"]
if model is not None and checkpoint["model_state"] is not None:
model.load_state_dict(checkpoint["model_state"])
if optimizer is not None and checkpoint["optimizer_state"] is not None:
optimizer.load_state_dict(checkpoint["optimizer_state"])
print("==> Done")
return it, epoch, best_prec
else:
print("==> Checkpoint '{}' not found".format(filename))
return None
def variable_size_collate(pad_val=0, use_shared_memory=True):
import collections
_numpy_type_map = {
"float64": torch.DoubleTensor,
"float32": torch.FloatTensor,
"float16": torch.HalfTensor,
"int64": torch.LongTensor,
"int32": torch.IntTensor,
"int16": torch.ShortTensor,
"int8": torch.CharTensor,
"uint8": torch.ByteTensor,
}
def wrapped(batch):
"Puts each data field into a tensor with outer dimension batch size"
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
max_len = 0
for b in batch:
max_len = max(max_len, b.size(0))
numel = sum([int(b.numel() / b.size(0) * max_len) for b in batch])
if use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
else:
out = batch[0].new(numel)
out = out.view(
len(batch),
max_len,
*[batch[0].size(i) for i in range(1, batch[0].dim())]
)
out.fill_(pad_val)
for i in range(len(batch)):
out[i, 0 : batch[i].size(0)] = batch[i]
return out
elif (
elem_type.__module__ == "numpy"
and elem_type.__name__ != "str_"
and elem_type.__name__ != "string_"
):
elem = batch[0]
if elem_type.__name__ == "ndarray":
# array of string classes and object
if re.search("[SaUO]", elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return wrapped([torch.from_numpy(b) for b in batch])
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith("float") else int
return _numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], collections.Mapping):
return {key: wrapped([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [wrapped(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
return wrapped
class TrainValSplitter:
r"""
Creates a training and validation split to be used as the sampler in a pytorch DataLoader
Parameters
---------
numel : int
Number of elements in the entire training dataset
percent_train : float
Percentage of data in the training split
shuffled : bool
Whether or not shuffle which data goes to which split
"""
def __init__(self, numel, percent_train, shuffled=False):
# type: (TrainValSplitter, int, float, bool) -> None
indicies = np.array([i for i in range(numel)])
if shuffled:
np.random.shuffle(indicies)
self.train = torch.utils.data.sampler.SubsetRandomSampler(
indicies[0 : int(percent_train * numel)]
)
self.val = torch.utils.data.sampler.SubsetRandomSampler(
indicies[int(percent_train * numel) : -1]
)
class CrossValSplitter:
r"""
Class that creates cross validation splits. The train and val splits can be used in pytorch DataLoaders. The splits can be updated
by calling next(self) or using a loop:
for _ in self:
....
Parameters
---------
numel : int
Number of elements in the training set
k_folds : int
Number of folds
shuffled : bool
Whether or not to shuffle which data goes in which fold
"""
def __init__(self, numel, k_folds, shuffled=False):
# type: (CrossValSplitter, int, int, bool) -> None
inidicies = np.array([i for i in range(numel)])
if shuffled:
np.random.shuffle(inidicies)
self.folds = np.array(np.array_split(inidicies, k_folds), dtype=object)
self.current_v_ind = -1
self.val = torch.utils.data.sampler.SubsetRandomSampler(self.folds[0])
self.train = torch.utils.data.sampler.SubsetRandomSampler(
np.concatenate(self.folds[1:], axis=0)
)
self.metrics = {}
def __iter__(self):
self.current_v_ind = -1
return self
def __len__(self):
return len(self.folds)
def __getitem__(self, idx):
assert idx >= 0 and idx < len(self)
self.val.inidicies = self.folds[idx]
self.train.inidicies = np.concatenate(
self.folds[np.arange(len(self)) != idx], axis=0
)
def __next__(self):
self.current_v_ind += 1
if self.current_v_ind >= len(self):
raise StopIteration
self[self.current_v_ind]
def update_metrics(self, to_post):
# type: (CrossValSplitter, dict) -> None
for k, v in to_post.items():
if k in self.metrics:
self.metrics[k].append(v)
else:
self.metrics[k] = [v]
def print_metrics(self):
for name, samples in self.metrics.items():
xbar = stats.mean(samples)
sx = stats.stdev(samples, xbar)
tstar = student_t.ppf(1.0 - 0.025, len(samples) - 1)
margin_of_error = tstar * sx / sqrt(len(samples))
print("{}: {} +/- {}".format(name, xbar, margin_of_error))
def set_bn_momentum_default(bn_momentum):
def fn(m):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.momentum = bn_momentum
return fn
class BNMomentumScheduler(object):
def __init__(self, model, bn_lambda, last_epoch=-1, setter=set_bn_momentum_default):
if not isinstance(model, nn.Module):
raise RuntimeError(
"Class '{}' is not a PyTorch nn Module".format(type(model).__name__)
)
self.model = model
self.setter = setter
self.lmbd = bn_lambda
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.model.apply(self.setter(self.lmbd(epoch)))
class Trainer(object):
r"""
Reasonably generic trainer for pytorch models
Parameters
----------
model : pytorch model
Model to be trained
model_fn : function (model, inputs, labels) -> preds, loss, accuracy
optimizer : torch.optim
Optimizer for model
checkpoint_name : str
Name of file to save checkpoints to
best_name : str
Name of file to save best model to
lr_scheduler : torch.optim.lr_scheduler
Learning rate scheduler. .step() will be called at the start of every epoch
bnm_scheduler : BNMomentumScheduler
Batchnorm momentum scheduler. .step() will be called at the start of every epoch
eval_frequency : int
How often to run an eval
log_name : str
Name of file to output tensorboard_logger to
"""
def __init__(
self,
model,
model_fn,
optimizer,
checkpoint_name="ckpt",
best_name="best",
lr_scheduler=None,
bnm_scheduler=None,
eval_frequency=-1,
viz=None,
):
self.model, self.model_fn, self.optimizer, self.lr_scheduler, self.bnm_scheduler = (
model,
model_fn,
optimizer,
lr_scheduler,
bnm_scheduler,
)
self.checkpoint_name, self.best_name = checkpoint_name, best_name
self.eval_frequency = eval_frequency
self.training_best, self.eval_best = {}, {}
self.viz = viz
@staticmethod
def _decode_value(v):
if isinstance(v[0], float):
return np.mean(v)
elif isinstance(v[0], tuple):
if len(v[0]) == 3:
num = [l[0] for l in v]
denom = [l[1] for l in v]
w = v[0][2]
else:
num = [l[0] for l in v]
denom = [l[1] for l in v]
w = None
return np.average(
np.sum(num, axis=0) / (np.sum(denom, axis=0) + 1e-6), weights=w
)
else:
raise AssertionError("Unknown type: {}".format(type(v)))
def _train_it(self, it, batch):
self.model.train()
if self.lr_scheduler is not None:
self.lr_scheduler.step(it)
if self.bnm_scheduler is not None:
self.bnm_scheduler.step(it)
self.optimizer.zero_grad()
_, loss, eval_res = self.model_fn(self.model, batch)
loss.backward()
self.optimizer.step()
return eval_res
def eval_epoch(self, d_loader):
self.model.eval()
eval_dict = {}
total_loss = 0.0
count = 1.0
for i, data in tqdm.tqdm(
enumerate(d_loader, 0), total=len(d_loader), leave=False, desc="val"
):
self.optimizer.zero_grad()
_, loss, eval_res = self.model_fn(self.model, data, eval=True)
total_loss += loss.item()
count += 1
for k, v in eval_res.items():
if v is not None:
eval_dict[k] = eval_dict.get(k, []) + [v]
return total_loss / count, eval_dict
def train(
self,
start_it,
start_epoch,
n_epochs,
train_loader,
test_loader=None,
best_loss=0.0,
):
r"""
Call to begin training the model
Parameters
----------
start_epoch : int
Epoch to start at
n_epochs : int
Number of epochs to train for
test_loader : torch.utils.data.DataLoader
DataLoader of the test_data
train_loader : torch.utils.data.DataLoader
DataLoader of training data
best_loss : float
Testing loss of the best model
"""
eval_frequency = (
self.eval_frequency if self.eval_frequency > 0 else len(train_loader)
)
it = start_it
with tqdm.trange(start_epoch, n_epochs + 1, desc="epochs") as tbar, tqdm.tqdm(
total=eval_frequency, leave=False, desc="train"
) as pbar:
for epoch in tbar:
for batch in train_loader:
res = self._train_it(it, batch)
it += 1
pbar.update()
pbar.set_postfix(dict(total_it=it))
tbar.refresh()
if self.viz is not None:
self.viz.update("train", it, res)
if (it % eval_frequency) == 0:
pbar.close()
if test_loader is not None:
val_loss, res = self.eval_epoch(test_loader)
if self.viz is not None:
self.viz.update("val", it, res)
is_best = val_loss < best_loss
best_loss = min(best_loss, val_loss)
save_checkpoint(
checkpoint_state(
self.model, self.optimizer, val_loss, epoch, it
),
is_best,
filename=self.checkpoint_name,
bestname=self.best_name,
)
pbar = tqdm.tqdm(
total=eval_frequency, leave=False, desc="train"
)
pbar.set_postfix(dict(total_it=it))
self.viz.flush()
return best_loss
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class Seq(nn.Sequential):
def __init__(self, input_channels):
super(Seq, self).__init__()
self.count = 0
self.current_channels = input_channels
def conv1d(
self,
out_size,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
activation=nn.ReLU(inplace=True),
bn=False,
init=nn.init.kaiming_normal_,
bias=True,
preact=False,
name="",
norm_layer=BatchNorm1d,
):
# type: (Seq, int, int, int, int, int, Any, bool, Any, bool, bool, AnyStr) -> Seq
self.add_module(
str(self.count),
Conv1d(
self.current_channels,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
activation=activation,
bn=bn,
init=init,
bias=bias,
preact=preact,
name=name,
norm_layer=norm_layer,
),
)
self.count += 1
self.current_channels = out_size
return self
def conv2d(
self,
out_size,
kernel_size=(1, 1),
stride=(1, 1),
padding=(0, 0),
dilation=(1, 1),
activation=nn.ReLU(inplace=True),
bn=False,
init=nn.init.kaiming_normal_,
bias=True,
preact=False,
name="",
norm_layer=BatchNorm2d,
):
# type: (Seq, int, Tuple[int, int], Tuple[int, int], Tuple[int, int], Tuple[int, int], Any, bool, Any, bool, bool, AnyStr) -> Seq
self.add_module(
str(self.count),
Conv2d(
self.current_channels,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
activation=activation,
bn=bn,
init=init,
bias=bias,
preact=preact,
name=name,
norm_layer=norm_layer,
),
)
self.count += 1
self.current_channels = out_size
return self
def conv3d(
self,
out_size,
kernel_size=(1, 1, 1),
stride=(1, 1, 1),
padding=(0, 0, 0),
dilation=(1, 1, 1),
activation=nn.ReLU(inplace=True),
bn=False,
init=nn.init.kaiming_normal_,
bias=True,
preact=False,
name="",
norm_layer=BatchNorm3d,
):
# type: (Seq, int, Tuple[int, int], Tuple[int, int, int], Tuple[int, int, int], Tuple[int, int, int], Any, bool, Any, bool, bool, AnyStr) -> Seq
self.add_module(
str(self.count),
Conv3d(
self.current_channels,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
activation=activation,
bn=bn,
init=init,
bias=bias,
preact=preact,
name=name,
norm_layer=norm_layer,
),
)
self.count += 1
self.current_channels = out_size
return self
def fc(
self,
out_size,
activation=nn.ReLU(inplace=True),
bn=False,
init=None,
preact=False,
name="",
):
# type: (Seq, int, Any, bool, Any, bool, AnyStr) -> None
self.add_module(
str(self.count),
FC(
self.current_channels,
out_size,
activation=activation,
bn=bn,
init=init,
preact=preact,
name=name,
),
)
self.count += 1
self.current_channels = out_size
return self
def dropout(self, p=0.5):
# type: (Seq, float) -> Seq
self.add_module(str(self.count), nn.Dropout(p=0.5))
self.count += 1
return self
def maxpool2d(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
):
self.add_module(
str(self.count),
nn.MaxPool2d(
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
return_indices=return_indices,
ceil_mode=ceil_mode,
),
)
self.count += 1
return self
| 28,499 | 27.904665 | 175 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/ShapeAssembly.py | # Taken from https://github.com/rkjones4/ShapeAssembly
import torch
import re
import numpy as np
import math
import ast
import sys
import faiss
from copy import deepcopy
"""
This file contains all of the logic in the ShapeAssembly DSL.
You can execute a ShapeAssembly program as follows:
> from ShapeAssembly import ShapeAssembly
> sa = ShapeAssembly()
> lines = sa.load_lines({path_to_program})
> sa.run(lines, {out_file_name})
The classes in this file are:
Cuboid -> Part Proxies represented as Cuboids in space
AttPoint -> Points that live within the local coordinate frames of cuboids -> specify where cuboids should attach
Program -> Parses lines, locally executes lines by creating Cuboids, AttPoints, and changing their attributes.
ShapeAssembly -> Entrypoint to language logic
"""
# Params controlling execution behavior
EPS = .01
SMALL_EPS = 1e-4
COS_DIST_THRESH = 0.9
# Helper function: write mesh to out file
def writeObj(verts, faces, outfile):
with open(outfile, 'w') as f:
for a, b, c in verts.tolist():
f.write(f'v {a} {b} {c}\n')
for a, b, c in faces.tolist():
f.write(f"f {a+1} {b+1} {c+1}\n")
def samplePC(cubes, xyz1, xyz2):
cube_geom = []
for c in cubes:
cube_geom.append(torch.cat((
c['xd'].unsqueeze(0),
c['yd'].unsqueeze(0),
c['zd'].unsqueeze(0),
c['center'],
c['xdir'],
c['ydir']
)))
scene_geom = torch.stack([c for c in cube_geom])
ind_to_pc = {}
xyzs = [xyz1, xyz2]
for i in range(0, scene_geom.shape[0]):
xyz = xyzs[i]
s_inds = (torch.ones(1,xyz.shape[1]) * i).long()
s_r = torch.cat(
(
(scene_geom[s_inds][:, :, 6:9] / (scene_geom[s_inds][:, :, 6:9].norm(dim=2).unsqueeze(2) + 1e-8)).unsqueeze(3),
(scene_geom[s_inds][:, :, 9:12] / (scene_geom[s_inds][:, :, 9:12].norm(dim=2).unsqueeze(2) + 1e-8)).unsqueeze(3),
torch.cross(
scene_geom[s_inds][:, :, 6:9] / (scene_geom[s_inds][:, :, 6:9].norm(dim=2).unsqueeze(2) + 1e-8),
scene_geom[s_inds][:, :, 9:12] / (scene_geom[s_inds][:, :, 9:12].norm(dim=2).unsqueeze(2) + 1e-8)
).unsqueeze(3)
), dim = 3)
s_out = ((s_r @ (((xyz - .5) * scene_geom[s_inds][:, :, :3]).unsqueeze(-1))).squeeze() + scene_geom[s_inds][:, :, 3:6]).squeeze()
ind_to_pc[i] = s_out
res = {}
for key in ind_to_pc:
index = faiss.IndexFlatL2(3)
index.add(
np.ascontiguousarray(ind_to_pc[key].cpu().numpy())
)
res[key] = (ind_to_pc[key], index)
return res
# Helper function: given angle + normal compute a rotation matrix that will accomplish the operation
def getRotMatrix(angle, normal):
s = torch.sin(angle)
c = torch.cos(angle)
nx = normal[0]
ny = normal[1]
nz = normal[2]
rotmat = torch.stack((
torch.stack((c + (1 - c) * nx * nx, (1 - c) * nx * ny - s * nz, (1 - c) * nx * nz + s * ny)),
torch.stack(((1 - c) * nx * ny + s * nz, c + (1 - c) * ny * ny, (1 - c) * ny * nz - s * nx)),
torch.stack(((1 - c) * nx * nz - s * ny, (1 - c) * ny * nz + s * nx, c + (1 - c) * nz * nz))
))
return rotmat
# Helper function: Find a minimum rotation from the current direction to the target direction
def findMinRotation(cur, target):
assert(cur.norm() != 0)
assert(target.norm() != 0)
ncur = cur / cur.norm()
ntarget = target / target.norm()
normal = torch.cross(ncur, ntarget)
# co-linear
if normal.norm() == 0:
r_x = torch.tensor([[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0, 1.0, 0.0]])
r_y = torch.tensor([[0.0, 0, 1.0], [0.0, 1.0, 0.0], [ -1.0, 0.0, 0.0]])
r_z = torch.tensor([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
if torch.dot(r_x @ ncur, ncur) != 0:
cnormal = r_x @ ncur
elif torch.dot(r_y @ ncur, ncur) != 0:
cnormal = r_y @ cur
elif torch.dot(r_z @ ncur, ncur) != 0:
cnormal = r_z @ ncur
assert(cnormal.norm() != 0)
nnormal = cnormal / cnormal.norm()
angle = torch.tensor(math.pi)
else:
nnormal = normal / normal.norm()
angle = torch.acos(torch.dot(ncur, ntarget))
if angle == 0 or torch.isnan(angle).any():
return torch.eye(3)
return getRotMatrix(angle, nnormal)
class Cuboid():
"""
Cuboids are the base (and only) objects of a ShapeAssembly program. Dims are their dimensions, pos is the center of the cuboid, rfnorm (right face), tfnorm (top face) and ffnorm (front face) specify the orientation of the cuboid. The bounding volume is just a non-visible cuboid. Cuboids marked with the aligned flag behavior differently under attachment operations.
"""
def __init__(self, name, aligned = False, vis = True):
# The default cube is unit, axis-aligned, centered at the origin
self.dims = torch.tensor([1.0,1.0,1.0])
self.pos = torch.tensor([0.0,0.0,0.0])
self.rfnorm = torch.tensor([1.0,0.0,0.0])
self.tfnorm = torch.tensor([0.0,1.0,0.0])
self.ffnorm = torch.tensor([0.0,0.0,1.0])
# Keep track of all attachment obligations this cube has
self.attachments = []
self.move_atts = []
# The bbox is not visible, but is still a cuboid, otherwise this should be True
self.is_visible = vis
self.name = name
self.parent = None
self.parent_axis = None
self.aligned = aligned
# Rotate the cuboid by the rotation matrix
def rotateCuboid(self, rotation):
self.rfnorm = rotation @ self.rfnorm
self.tfnorm = rotation @ self.tfnorm
self.ffnorm = rotation @ self.ffnorm
def flipCuboid(self, a_ind):
transform = torch.ones(3)
transform[a_ind] *= -1
self.pos = transform * self.pos
self.rfnorm = -1 * (transform * self.rfnorm)
self.tfnorm = -1 * (transform * self.tfnorm)
self.ffnorm = -1 * (transform * self.ffnorm)
# Get the corners of the cuboid
def getCorners(self):
xd = self.dims[0] / 2
yd = self.dims[1] / 2
zd = self.dims[2] / 2
corners = torch.stack((
(self.rfnorm * xd) + (self.tfnorm * yd) + (self.ffnorm * zd),
(self.rfnorm * xd) + (self.tfnorm * yd) + (self.ffnorm * -1 * zd),
(self.rfnorm * xd) + (self.tfnorm * -1 * yd) + (self.ffnorm * zd),
(self.rfnorm * xd) + (self.tfnorm * -1 * yd) + (self.ffnorm * -1 * zd),
(self.rfnorm * -1 * xd) + (self.tfnorm * yd) + (self.ffnorm * zd),
(self.rfnorm * -1 * xd) + (self.tfnorm * yd) + (self.ffnorm * -1 * zd),
(self.rfnorm * -1 * xd) + (self.tfnorm * -1 * yd) + (self.ffnorm * zd),
(self.rfnorm * -1 * xd) + (self.tfnorm * -1 * yd) + (self.ffnorm * -1 * zd),
))
return corners + self.pos
# Get the global point specified by relative coordinates x,y,z
def getPos(self, x, y, z):
pt = torch.stack((x, y, z))
r = torch.stack((
self.rfnorm,
self.tfnorm,
self.ffnorm
)).T
t_dims = torch.stack((self.dims[0], self.dims[1], self.dims[2]))
return (r @ ((pt - .5) * t_dims)) + self.pos
# Get the relative position of global poiunt gpt
def getRelPos(self, gpt, normalize = False):
O = self.getPos(
torch.tensor(0.),
torch.tensor(0.),
torch.tensor(0.)
)
A = torch.stack([
self.dims[0].clone() * self.rfnorm.clone(),
self.dims[1].clone() * self.tfnorm.clone(),
self.dims[2].clone() * self.ffnorm.clone()
]).T
B = gpt - O
p = A.inverse() @ B
if normalize:
return torch.clamp(p, 0.0, 1.0)
return p
# Make the cuboid bigger by a multiplied factor of scale (either dim 3 or dim 1)
def scaleCuboid(self, scale):
self.dims *= scale
# Make the cuboid bigger by an added factor of scale to a specific dimension
def increaseDim(self, dim, inc):
dim_to_scale = {
"height": torch.tensor([0.0, 1.0, 0.0]),
"width": torch.tensor([0.0, 0.0, 1.0]),
"length": torch.tensor([1.0, 0.0, 0.0])
}
s = dim_to_scale[dim] * inc
self.dims += s
# Move the center of the cuboid by the translation vector
def translateCuboid(self, translation):
self.pos += translation
# Used to convert cuboid into triangles on its faces
def getTriFaces(self):
return [
[0, 2, 1],
[1, 2, 3],
[0, 4, 6],
[0, 6, 2],
[1, 3, 5],
[3, 7, 5],
[4, 5, 7],
[4, 7, 6],
[1, 5, 4],
[0, 1, 4],
[2, 6, 7],
[2, 7, 3]
]
# Get the triangulation of the cuboid corners, for visualization + sampling
def getTris(self):
if self.is_visible:
verts = self.getCorners()
faces = torch.tensor(self.getTriFaces(), dtype=torch.long)
return verts, faces
return None, None
# Return any attachments that are on this cuboid
def getAttachments(self):
return self.attachments
# Return the cuboid's parameterization
def getParams(self):
return torch.cat((
self.dims, self.pos, self.rfnorm, self.tfnorm, self.ffnorm
))
class AttPoint():
"""
Attachment Points live with the local coordinate frame [0, 1]^3 of a cuboid. They are used to connect cuboids together.
"""
def __init__(self, cuboid, x, y, z):
self.cuboid = cuboid
self.x = x
self.y = y
self.z = z
# To get the global position, all we need is the cuboid+face info, and the relative uv pos
def getPos(self):
return self.cuboid.getPos(self.x, self.y, self.z)
# If we scale the height of the cuboid, what is the rate of change of this AP
def getChangeVectorHeight(self):
norm = self.cuboid.tfnorm
return (self.y - .5) * norm
# If we scale the length of the cuboid, what is the rate of change of this AP
def getChangeVectorLength(self):
norm = self.cuboid.rfnorm
return (self.x - .5) * norm
# If we scale the width of the cuboid, what is the rate of change of this AP
def getChangeVectorWidth(self):
norm = self.cuboid.ffnorm
return (self.z - .5) * norm
# get rate of change of this AP when we change the specified dimension
def getChangeVector(self, dim):
dim_to_sf = {
'height': self.getChangeVectorHeight,
'length': self.getChangeVectorLength,
'width': self.getChangeVectorWidth,
}
return dim_to_sf[dim]()
# If we scale the height of the cuboid, what direction does the AP move with
def getChangeDirHeight(self):
if self.y > .5:
return 'top'
elif self.y < .5:
return 'bot'
else:
return 'none'
# If we scale the length of the cuboid, what direction does the AP move with
def getChangeDirLength(self):
if self.x > .5:
return 'right'
elif self.x < .5:
return 'left'
else:
return 'none'
# If we scale the width of the cuboid, what direction does the AP move with
def getChangeDirWidth(self):
if self.z > .5:
return 'front'
elif self.z < .5:
return 'back'
else:
return 'none'
def getChangeDir(self, dim):
dim_to_dir = {
'height': self.getChangeDirHeight,
'length': self.getChangeDirLength,
'width': self.getChangeDirWidth,
}
return dim_to_dir[dim]()
class Program():
"""
A program maintains a representation of entire shape, including all of the member cuboids
and all of the attachment points. The execute function is the entrypoint of text programs.
"""
def __init__(self, cuboids = {}):
self.cuboids = self.getBoundBox()
self.cuboids.update(cuboids)
self.commands = []
self.parameters = []
self.att_points = {}
self.resource = None
# CONSTANTS For PC Intersection
DIM = 10
a = (torch.arange(DIM).float()/(DIM-1))
b = a.unsqueeze(0).unsqueeze(0).repeat(DIM, DIM, 1)
c = a.unsqueeze(0).unsqueeze(2).repeat(DIM, 1, DIM)
d = a.unsqueeze(1).unsqueeze(2).repeat(1, DIM, DIM)
g = torch.stack((b,c,d), dim=3).view(-1, 3)
self.s_xyz = g.unsqueeze(0)
bb_top_inds = (g[:,1] == 1.0).nonzero().squeeze()
bb_bot_inds = (g[:,1] == 0.0).nonzero().squeeze()
self.bb_top_xyz = g[bb_top_inds].unsqueeze(0)
self.bb_bot_xyz = g[bb_bot_inds].unsqueeze(0)
def flip(self, flip_axis):
if flip_axis == 'X':
axis = 0
elif flip_axis == 'Y':
axis = 1
elif flip_axis == 'Z':
axis = 2
for name, c in self.cuboids.items():
if name == 'bbox':
continue
c.flipCuboid(axis)
# Each program starts off with an invisible bounding box
def getBoundBox(self):
bbox = Cuboid("bbox", aligned = True, vis=False)
return {
"bbox": bbox
}
# Get the triangles in the current scene -> first index is bounding box so skipped
def getShapeGeo(self):
if len(self.cuboids) < 2:
return None, None
cuboids = list(self.cuboids.values())
verts = torch.tensor([],dtype=torch.float)
faces = torch.tensor([],dtype=torch.long)
for cube in cuboids[1:]:
v, f = cube.getTris()
if v is not None and f is not None:
faces = torch.cat((faces, (f + verts.shape[0])))
verts = torch.cat((verts, v))
return verts, faces
# Make an obj of the current scene
def render(self, ofile = "output.obj"):
verts, faces = self.getShapeGeo()
writeObj(verts, faces, ofile)
# Parses a cuboid text line
def parseCuboid(self, line):
s = re.split(r'[()]', line)
name = s[0].split("=")[0].strip()
dim0 = None
dim1 = None
dim2 = None
aligned = False
params = s[1].split(',')
dim0 = torch.tensor(float(params[0]))
dim1 = torch.tensor(float(params[1]))
dim2 = torch.tensor(float(params[2]))
if len(params) == 4:
aligned = ast.literal_eval(params[3].strip())
assert isinstance(aligned, bool), 'aligned not a bool'
return (name, dim0, dim1, dim2, aligned)
# Construct a new cuboid, add it to state
def executeCuboid(self, parse):
name = parse[0]
if name in self.cuboids:
c = self.cuboids[name]
c.dims = torch.stack((parse[1], parse[2], parse[3]))
else:
c = Cuboid(
parse[0],
aligned = parse[4],
)
c.scaleCuboid(torch.stack((parse[1], parse[2], parse[3])))
self.cuboids.update({
parse[0]: c
})
# Logic for cuboids with no previous attachment. Finds a translation to satisfy the attachment
def first_attach(self, ap, gpos):
cur_pos = ap.getPos()
diff = gpos - cur_pos
ap.cuboid.translateCuboid(diff)
return True
# Logic for unaligned cuboids with one previous attachment. Find a scale and rotation to satisfy the attachment
def second_attach(self, ap, gpos, prev_att):
p_ap = prev_att[0]
p_gpos = prev_att[1]
a = p_gpos
b = ap.getPos()
c = gpos
if (b-c).norm() < SMALL_EPS:
return True
# Increase dimension to fix distance
dist = (c-a).norm()
min_dim = 'height'
min_sf = 1e8
for dim in ('height', 'width', 'length'):
nsf = ap.getChangeVector(dim)
psf = p_ap.getChangeVector(dim)
if nsf.abs().sum() + psf.abs().sum() < SMALL_EPS:
continue
cn = b - a
dn = nsf - psf
at = (dn**2).sum()
bt = 2 * (cn*dn).sum()
ct = (cn**2).sum() - (dist**2)
# Take the positive solution of the quadratic equation
sf = ((-1 * bt) + (bt**2 - (4*at*ct) ).sqrt()) / ((2 * at) + 1e-8)
if abs(sf) < abs(min_sf) and (bt**2 - (4*at*ct)) > 0:
min_sf = sf
min_dim = dim
if min_sf == 1e8:
nsf = ap.getChangeVector('height') + \
ap.getChangeVector('length') + \
ap.getChangeVector('width')
psf = p_ap.getChangeVector('height') + \
p_ap.getChangeVector('length') + \
p_ap.getChangeVector('width')
cn = b - a
dn = nsf - psf
at = (dn**2).sum()
bt = 2 * (cn*dn).sum()
ct = (cn**2).sum() - (dist**2)
# Take the positive solution of the quadratic equation
sf = ((-1 * bt) + (bt**2 - (4*at*ct) ).sqrt()) / ((2 * at) + 1e-8)
if not torch.isnan(sf) and (bt**2 - (4*at*ct)) > 0:
ap.cuboid.increaseDim('height', sf)
ap.cuboid.increaseDim('length', sf)
ap.cuboid.increaseDim('width', sf)
else:
ap.cuboid.increaseDim(min_dim, min_sf)
# Reset the position of the cuboid such that the previous attachment is satisfied
diff = p_gpos - p_ap.getPos()
ap.cuboid.translateCuboid(diff)
# find rotation to match points
nb = ap.getPos() - p_gpos
nc = c - p_gpos
# If we are already in the correct position, don't rotate
if nb.norm() == 0 or nc.norm() == 0 or (nb-nc).norm() < SMALL_EPS:
return True
rot_mat = findMinRotation(nb, nc)
ap.cuboid.rotateCuboid(rot_mat)
# Reset the position of the cuboid such that the attachments are satisfied
sdiff = p_gpos - p_ap.getPos()
ap.cuboid.translateCuboid(sdiff)
return True
# Returns if all of the previous attachment points on cuboid are co-linear
def checkColinear(self, prev_atts):
if len(prev_atts) == 2:
return True
else:
o = prev_atts[0][1]
l = prev_atts[1][1] - o
for i in range(2, len(prev_atts)):
n = prev_atts[i][1] - o
if torch.cross(n, l).norm() > EPS:
return False
return True
# For cuboids with 2+ attachments, checks if changing orientation would improve the fit
def doOrient(self, ap, gpos, prev_atts):
p_ap0 = prev_atts[0][0]
p_gpos0 = prev_atts[0][1]
p_ap1 = prev_atts[1][0]
p_gpos1 = prev_atts[1][1]
a0 = p_gpos0
a1 = p_gpos1
b = ap.getPos()
c = gpos
axis = (a0 - a1) / ((a0 - a1).norm() + 1e-8)
# project c onto the plane defined by b and the axis of rotation
tc = (axis.dot(b) - axis.dot(c)) / (axis.dot(axis) + 1e-8)
c_p = c + (tc * axis)
# project a0 onto the plane defined by b and the axis of rotation, this is the origin of rotation
ta = (axis.dot(b) - axis.dot(a0)) / (axis.dot(axis) + 1e-8)
a_p = a0 + (ta * axis)
# project center of the cube onto the plane defined by b and the axis of rotation
center = ap.cuboid.pos
to = (axis.dot(b) - axis.dot(center)) / (axis.dot(axis) + 1e-8)
o_p = center + (to * axis)
fn = (b - a_p)
noc = c_p - a_p
at = fn.dot(fn)
bt = 2 * (b-a_p).dot(fn)
ct = (b-a_p).dot((b-a_p)) - noc.dot(noc)
sf = ((-1 * bt) + (bt**2 - (4*at*ct) ).sqrt()) / ((2 * at) + 1e-8)
if torch.isnan(sf):
return False
b_p = b + (sf * fn)
nob = b_p - a_p
# If we are already in the correct position, don't rotate
if nob.norm() == 0 or noc.norm() == 0 or (nob-noc).norm() < EPS:
return True
nb = nob / (nob.norm() + 1e-8)
nc = noc / (noc.norm() + 1e-8)
angle = torch.acos(torch.dot(nb, nc))
rot_mat = getRotMatrix(angle, axis)
# make sure axis is oriented correctly
if ((rot_mat @ nb) - nc).norm() > EPS:
rot_mat = getRotMatrix(angle, -1*axis)
ap.cuboid.rotateCuboid(rot_mat)
diff = a0 - p_ap0.getPos()
ap.cuboid.translateCuboid(diff)
# For cuboids with 2+ attachments, checks if scaling any dimension would improve the fit
def maybeScale(self, ap, gpos, prev_atts):
b = ap.getPos()
c = gpos
t_dir = c - b
if t_dir.norm() < EPS:
return
# Can only scale a dimension if all of the CV for that dimension are the same
eq_cv_dims = []
for dim in ('height', 'width', 'length'):
cv = prev_atts[0][0].getChangeVector(dim)
add = True
for p_ap in prev_atts:
if (cv - p_ap[0].getChangeVector(dim)).norm() > EPS:
add = False
if add:
eq_cv_dims.append(dim)
dirs_to_avoid = set()
best_dim = None
cos_dist = COS_DIST_THRESH
nt_dir = t_dir / (t_dir.norm() + 1e-8)
# See if scaling any direction would improve the fit (i.e. has a valid solution to quadratic equation)
for dim in eq_cv_dims:
sf = ap.getChangeVector(dim)
dr = ap.getChangeDir(dim)
if dr in dirs_to_avoid:
continue
nsf = sf / (sf.norm() + 1e-8)
if torch.isnan(nsf).any().tolist():
continue
if nt_dir.dot(nsf) > cos_dist:
best_dim = dim
cost_dist = nt_dir.dot(nsf)
if best_dim == None:
return False
axis = ap.getChangeVector(best_dim)
paxis = prev_atts[0][0].getChangeVector(best_dim)
d = 1.0 / ((axis-paxis).norm() + 1e-8)
naxis = axis / (axis.norm() + 1e-8)
t = (naxis.dot(b) - naxis.dot(c)) / (naxis.dot(naxis) + 1e-8)
c_p = c + (t * naxis)
offset = c_p - b
c_g = c - offset
sf = (c_g - b).norm()
ap.cuboid.increaseDim(best_dim, sf*d)
diff = prev_atts[0][1] - prev_atts[0][0].getPos()
ap.cuboid.translateCuboid(diff)
# Given cuboids a and b, find the closest pair of points in their local coordinate frames to one another
def getClosestPoints(self, a, b, xyz1, xyz2, is_bbox):
if not is_bbox:
a_corners = a.getCorners()
for i in range(a_corners.shape[0]):
rp = b.getRelPos(a_corners[i])
if rp.min() >= 0.0 and rp.max() <= 1.0:
return None, None
b_corners = b.getCorners()
for i in range(b_corners.shape[0]):
rp = a.getRelPos(b_corners[i])
if rp.min() >= 0.0 and rp.max() <= 1.0:
return None, None
ind_to_pc = samplePC(
[
{
'xd': a.dims[0].detach(),
'yd': a.dims[1].detach(),
'zd': a.dims[2].detach(),
'center': a.pos.detach(),
'xdir': a.rfnorm.detach(),
'ydir': a.tfnorm.detach(),
},
{
'xd': b.dims[0].detach(),
'yd': b.dims[1].detach(),
'zd': b.dims[2].detach(),
'center': b.pos.detach(),
'xdir': b.rfnorm.detach(),
'ydir': b.tfnorm.detach()
}
],
xyz1,
xyz2
)
a_pc = ind_to_pc[0][0].unsqueeze(0)
b_pc = ind_to_pc[1][0].unsqueeze(0)
b_index = ind_to_pc[1][1]
a_query = np.ascontiguousarray(
a_pc.data.clone().detach().cpu().numpy()
)
D, I = b_index.search(a_query[0], 1)
I_var = torch.from_numpy(np.ascontiguousarray(I).astype(np.int64)).squeeze()
D_var = torch.from_numpy(np.ascontiguousarray(D).astype(np.float)).squeeze()
a_ind = D_var.argmin()
b_ind = I_var[a_ind]
a_pt = a_pc[0, a_ind]
b_pt = b_pc[0, b_ind]
return a_pt.cpu(), b_pt.cpu()
# For aligned cuboids with a previous attachments,
# see if increasing any dimension would cause the fit to be improved
def aligned_attach(self, ap, oap):
if oap.cuboid.name != 'bbox':
xyz2 = self.s_xyz
else:
if oap.y > 0.5:
xyz2 = self.bb_top_xyz
else:
xyz2 = self.bb_bot_xyz
b, c = self.getClosestPoints(
ap.cuboid,
oap.cuboid,
self.s_xyz,
xyz2,
oap.cuboid.name == 'bbox'
)
if b is None or c is None:
return
mv = max(
ap.cuboid.dims.max().item(),
oap.cuboid.dims.max().item()
)
# Do a sampling of 20 in inter
if (b-c).norm() < mv * .05:
return
rp = ap.cuboid.getRelPos(b, True)
n_ap = AttPoint(ap.cuboid, rp[0], rp[1], rp[2])
o_ap = AttPoint(ap.cuboid, 1-rp[0], 1-rp[1], 1-rp[2])
ppos = o_ap.getPos()
D_l = 0
D_h = 0
D_w = 0
l_cv = n_ap.getChangeVector('length')[0]
if l_cv != 0:
D_l = (c[0] - b[0]) / (2*l_cv)
h_cv = n_ap.getChangeVector('height')[1]
if h_cv != 0:
D_h = (c[1] - b[1]) / (2*h_cv)
w_cv = n_ap.getChangeVector('width')[2]
if w_cv != 0:
D_w = (c[2] - b[2]) / (2*w_cv)
if D_l > 0:
ap.cuboid.increaseDim("length", D_l)
if D_h > 0:
ap.cuboid.increaseDim("height", D_h)
if D_w > 0:
ap.cuboid.increaseDim("width", D_w)
diff = ppos - o_ap.getPos()
ap.cuboid.translateCuboid(diff)
# Attachment for a non-aligned cuboid with 2+ previous attachments. Only try to orient the cuboid if its previous attachment are
# all colinear.
def gen_attach(self, ap, gpos, prev_atts):
if self.checkColinear(prev_atts):
self.doOrient(ap, gpos, prev_atts)
self.maybeScale(ap, gpos, prev_atts)
# Moves the attach point to the global position
def attach(self, ap, gpos, oci, oap=None):
assert ap.cuboid.name != "bbox", 'tried to move the bbox'
if ap.cuboid.aligned:
self.aligned_cube_attach(ap, gpos, oci, oap)
else:
self.free_cube_attach(ap, gpos, oci)
# Aligned attachment
def aligned_cube_attach(self, ap, gpos, oci, oap):
prev_atts = ap.cuboid.getAttachments()
if len(prev_atts) == 0:
self.first_attach(ap, gpos)
else:
self.aligned_attach(ap, oap)
prev_atts.append((ap, gpos, oci))
ap.cuboid.move_atts.append((ap, gpos, oci))
# Non-aligned attachment
def free_cube_attach(self, ap, gpos, oci):
prev_atts = ap.cuboid.getAttachments()
if len(prev_atts) == 0:
self.first_attach(ap, gpos)
elif len(prev_atts) == 1:
self.second_attach(ap, gpos, prev_atts[0])
else:
self.gen_attach(ap, gpos, prev_atts)
prev_atts.append((ap, gpos, oci))
ap.cuboid.move_atts.append((ap, gpos, oci))
# Parses an attach line
def parseAttach(self, line):
s = re.split(r'[()]', line)
args = [a.strip() for a in s[1].split(',')]
return (
args[0],
args[1],
torch.tensor(float(args[2])),
torch.tensor(float(args[3])),
torch.tensor(float(args[4])),
torch.tensor(float(args[5])),
torch.tensor(float(args[6])),
torch.tensor(float(args[7]))
)
# Execute an attach line, creates two attachment points, then figures out how to best satisfy new constraint
def executeAttach(self, parse):
ap1 = AttPoint(
self.cuboids[parse[0]],
parse[2],
parse[3],
parse[4],
)
ap2 = AttPoint(
self.cuboids[parse[1]],
parse[5],
parse[6],
parse[7],
)
ap_pt_name = f'{parse[0]}_to_{parse[1]}'
# Attach points should have unique names
while ap_pt_name in self.att_points:
ap_pt_name += '_n'
self.att_points[ap_pt_name] = ap2
ap2.cuboid.getAttachments().append((ap2, ap2.getPos(), ap1.cuboid.name))
self.attach(ap1, ap2.getPos(), ap2.cuboid.name, ap2)
# Parses a reflect command
def parseReflect(self, line):
s = re.split(r'[()]', line)
args = [a.strip() for a in s[1].split(',')]
return (
args[0],
args[1],
)
# Parses a translate command
def parseTranslate(self, line):
s = re.split(r'[()]', line)
args = [a.strip() for a in s[1].split(',')]
return (
args[0],
args[1],
int(args[2]),
float(args[3])
)
# Parses a queeze command
def parseSqueeze(self, line):
s = re.split(r'[()]', line)
args = [a.strip() for a in s[1].split(',')]
return (
args[0],
args[1],
args[2],
args[3],
float(args[4]),
float(args[5])
)
# Help function for getting direction of reflect commands
def getRefDir(self, d):
bbox = self.cuboids['bbox']
if d == 'X':
return bbox.rfnorm.clone()
elif d == 'Y':
return bbox.tfnorm.clone()
elif d == 'Z':
return bbox.ffnorm.clone()
else:
assert False, 'bad reflect argument'
# Help function for getting direction + scale of translate commands
def getTransDir(self, d):
bbox = self.cuboids['bbox']
if d == 'X':
return bbox.rfnorm.clone(), bbox.dims[0].clone()
elif d == 'Y':
return bbox.tfnorm.clone(), bbox.dims[1].clone()
elif d == 'Z':
return bbox.ffnorm.clone(), bbox.dims[2].clone()
else:
assert False, 'bad reflect argument'
# Given an axis + a center, consructs a tranformation matrix to satisfy reflection
def getRefMatrixHomo(self, axis, center):
m = center
d = axis / axis.norm()
refmat = torch.stack((
torch.stack((1 - 2 * d[0] * d[0], -2 * d[0] * d[1], -2 * d[0] * d[2], 2 * d[0] * d[0] * m[0] + 2 * d[0] * d[1] * m[1] + 2 * d[0] * d[2] * m[2])),
torch.stack((-2 * d[1] * d[0], 1 - 2 * d[1] * d[1], -2 * d[1] * d[2], 2 * d[1] * d[0] * m[0] + 2 * d[1] * d[1] * m[1] + 2 * d[1] * d[2] * m[2])),
torch.stack((-2 * d[2] * d[0], -2 * d[2] * d[1], 1 - 2 * d[2] * d[2], 2 * d[2] * d[0] * m[0] + 2 * d[2] * d[1] * m[1] + 2 * d[2] * d[2] * m[2]))
))
return refmat
# Reflect a point p, about center and a direction ndir
def reflect_point(self, p, center, ndir):
pad = torch.nn.ConstantPad1d((0, 1), 1.0)
reflection = self.getRefMatrixHomo(ndir, center)
posHomo = pad(p)
return reflection @ posHomo
# Given angle, axis and center constructs a transformation matrix to satisfy rotation
def getRotMatrixHomo(self, angle, axis, center):
s = torch.sin(angle)
c = torch.cos(angle)
naxis = axis / axis.norm()
nx = naxis[0]
ny = naxis[1]
nz = naxis[2]
tmpmat = torch.stack((
torch.stack((c + (1 - c) * nx * nx, (1 - c) * nx * ny - s * nz, (1 - c) * nx * nz + s * ny)),
torch.stack(((1 - c) * nx * ny + s * nz, c + (1 - c) * ny * ny, (1 - c) * ny * nz - s * nx)),
torch.stack(((1 - c) * nx * nz - s * ny, (1 - c) * ny * nz + s * nx, c + (1 - c) * nz * nz))
))
rotmat = torch.cat((tmpmat, torch.zeros(3,1)), dim = 1)
lc = (torch.matmul(torch.eye(3) - tmpmat, center)).unsqueeze(1)
return torch.cat((rotmat[:,:3], lc), dim = 1)
# Executes a reflect line by making + executing new Cuboid and attach lines
def executeReflect(self, parse):
c = self.cuboids[parse[0]]
assert c.name != "bbox", 'tried to move the bbox'
rdir = self.getRefDir(parse[1])
name = c.name + '_ref'
self.executeCuboid([f'{name}', c.dims[0].clone(), c.dims[1].clone(), c.dims[2].clone(), c.aligned])
self.cuboids[f'{name}'].parent = c.name
self.cuboids[f'{name}'].parent_axis = parse[1]
atts = c.move_atts
for att in atts:
if parse[1] == 'X':
x = 1 - att[0].x.clone()
else:
x = att[0].x.clone()
if parse[1] == 'Y':
y = 1 - att[0].y.clone()
else:
y = att[0].y.clone()
if parse[1] == 'Z':
z = 1 - att[0].z.clone()
else:
z = att[0].z.clone()
n = att[2]
cpt = att[0].getPos().clone()
rpt = self.reflect_point(cpt, self.cuboids['bbox'].pos.clone(), rdir)
rrpt = self.cuboids[n].getRelPos(rpt, True)
self.executeAttach([f'{name}', f'{n}', x, y, z, rrpt[0], rrpt[1], rrpt[2]])
# Executes a translate line by making + executing new Cuboid and attach lines
def executeTranslate(self, parse):
c = self.cuboids[parse[0]]
assert c.name != "bbox", 'tried to move the bbox'
tdir, td = self.getTransDir(parse[1])
N = parse[2]
scale = (td * parse[3]) / float(N)
for i in range(1, N+1):
name = c.name + f'_trans_{i}'
self.executeCuboid([f'{name}', c.dims[0].clone(), c.dims[1].clone(), c.dims[2].clone(), c.aligned])
self.cuboids[f'{name}'].parent = c.name
atts = c.move_atts
for att in atts:
x = att[0].x
y = att[0].y
z = att[0].z
n = att[2]
cpt = att[0].getPos()
rpt = cpt + (tdir * scale * i)
rrpt = self.cuboids[n].getRelPos(rpt, True)
self.executeAttach([f'{name}', f'{n}', x, y, z, rrpt[0], rrpt[1], rrpt[2]])
# Helper function for finding opposite face
def getOppFace(self, face):
of = {
'right': 'left',
'left': 'right',
'top': 'bot',
'bot': 'top',
'front': 'back',
'back': 'front',
}
return of[face]
# Local coordinate frame to center of face conversion
def getFacePos(self, face):
ft = {
'right': ([1.0, 0.5, 0.5], 0, 0.),
'left': ([0.0, 0.5, 0.5], 0, 1.),
'top': ([.5, 1.0, 0.5], 1, 0.),
'bot': ([.5, 0.0, 0.5], 1, 1.),
'front': ([.5, 0.5, 1.0], 2, 0.),
'back': ([.5, 0.5, 0.0], 2, 1.),
}
return ft[face]
# Converts squeeze parameters into parameters needed for the two attachment operators.
def getSqueezeAtt(self, face, u, v, is_bbox):
at1, ind, val = self.getFacePos(face)
# bbox is "flipped"
if is_bbox:
rval = 1-val
else:
rval = val
at2 = torch.zeros(3).float()
q = [u, v]
for i in range(3):
if i == ind:
at2[i] = rval
else:
at2[i] = q.pop(0)
return torch.tensor(at1).float(), at2
# Executes a squeeze line by making + executing new Cuboid and attach lines
def executeSqueeze(self, parse):
face = parse[3]
oface = self.getOppFace(face)
atc1, ato1 = self.getSqueezeAtt(
face, parse[4], parse[5], parse[1] == 'bbox'
)
atc2, ato2 = self.getSqueezeAtt(
oface, parse[4], parse[5], parse[2] == 'bbox'
)
self.executeAttach([parse[0], parse[1], atc1[0], atc1[1], atc1[2], ato1[0], ato1[1], ato1[2]])
self.executeAttach([parse[0], parse[2], atc2[0], atc2[1], atc2[2], ato2[0], ato2[1], ato2[2]])
# Clear cuboids + attachment points, but keep the commands that made them in memory
def resetState(self):
self.cuboids = self.getBoundBox()
self.att_points = {}
# Supported commands and their execution functions
# Commands are first parsed to get their type + parameters. Then, the line is executed by calling to the appropriate execute function
def execute(self, line):
res = None
if "Cuboid(" in line:
parse = self.parseCuboid(line)
self.executeCuboid(parse)
elif "attach(" in line:
parse = self.parseAttach(line)
self.executeAttach(parse)
elif "reflect(" in line:
parse = self.parseReflect(line)
res = self.executeReflect(parse)
elif "translate(" in line:
parse = self.parseTranslate(line)
res = self.executeTranslate(parse)
elif "squeeze(" in line:
parse = self.parseSqueeze(line)
res = self.executeSqueeze(parse)
# return any new lines generated by macros
return res
# To re-run a program given a set of commands and parameters. Often used during fitting to unstructurd geometry.
def runProgram(self, param_lines):
self.resetState()
command_to_func = {
"Cuboid": self.executeCuboid,
"attach": self.executeAttach,
"squeeze": self.executeSqueeze,
"translate": self.executeTranslate,
"reflect": self.executeReflect
}
for command, parse in param_lines:
func = command_to_func[command]
func(parse)
# ** Helper Functions FOR ShapeAssembly Class **
def lineToAttrs(line):
P = Program()
if "Cuboid(" in line:
func = "Cuboid"
parse = list(P.parseCuboid(line))
param_inds = [1,2,3]
elif "attach(" in line:
func = "attach"
parse = list(P.parseAttach(line))
param_inds = [2,3,4,5,6,7]
elif "reflect(" in line:
func = "reflect"
parse = list(P.parseReflect(line))
param_inds = []
elif "translate(" in line:
func = "translate"
parse = list(P.parseTranslate(line))
param_inds = [3]
elif "squeeze(" in line:
func = "squeeze"
parse = list(P.parseSqueeze(line))
param_inds = [4,5]
tensor = torch.nn.Parameter(
torch.tensor([parse[i] for i in param_inds])
)
for i,j in enumerate(param_inds):
parse[j] = tensor[i]
return func, tensor, parse
def make_hier_prog(lines):
all_progs = {}
root_name = None
cur_name = None
cur_prog = []
cur_children = []
for line in lines:
if len(line) == 0:
continue
ls = line.strip().split()
if ls[0] == 'Assembly':
cur_name = ls[1]
if root_name is None:
root_name = cur_name
elif ls[0] == '}':
all_progs[cur_name] = (cur_prog, cur_children)
cur_children = []
cur_prog = []
cur_name = None
else:
if 'Cuboid' in line:
if 'Program_' in line:
cur_children.append(ls[0])
else:
cur_children.append(None)
cur_prog.append(line[1:-1])
hp = {'name': root_name}
q = [hp]
while(len(q)) > 0:
node = q.pop(0)
prog, children = all_progs[node['name']]
node['prog'] = prog
node['children'] = []
for child in children:
c = {}
if child is not None:
c = {'name': child}
q.append(c)
node['children'].append(c)
return hp
# Logic to create function text
def make_function(name, args):
args = [str(arg) for arg in args]
return '{}({})'.format(name, ", ".join(args))
def assign(var_name, value):
return '{} = {}'.format(var_name, value)
def get_pos_delta(abox, rbox, pos):
r = torch.stack((
abox.rfnorm,
abox.tfnorm,
abox.ffnorm
)).T
return (r @ pos) + (abox.pos - rbox.pos)
# Given a cuboid cube, and its local program bounding volume rbox, and the actual placement of its bonding volume abox, find the correct transformation for cube
def apply_delta(abox, rbox, cube):
r = torch.stack((
abox.rfnorm,
abox.tfnorm,
abox.ffnorm
)).T
cube.dims *= (abox.dims / rbox.dims)
cube.pos = (r @ cube.pos) + (abox.pos - rbox.pos)
cube.rfnorm = r @ cube.rfnorm
cube.tfnorm = r @ cube.tfnorm
cube.ffnorm = r @ cube.ffnorm
# Execute a hierarchical shapeassembly program, in a differentiable fashion
def diff_hier_execute(root, param_dict, return_all = False):
q = [(root, None, False)]
scene = []
while len(q) > 0:
node, bbox, flip_axis = q.pop(0)
param_lines = param_dict[node['name']]
if bbox is None:
bbox = Cuboid("bbox", aligned = True, vis=False)
bbox.dims = torch.stack(param_lines[0][1][1:4])
TP = Program()
TP.runProgram(param_lines)
if flip_axis:
TP.flip(flip_axis)
rbox = TP.cuboids.pop('bbox')
add = []
for i, c_key in enumerate(TP.cuboids.keys()):
flip = False
cub = TP.cuboids[c_key]
child = None
if i+1 < len(node["children"]):
child = node["children"][i+1]
elif cub.parent is not None:
pi = list(TP.cuboids.keys()).index(cub.parent)
child = deepcopy(node["children"][pi+1])
if cub.parent_axis is not None and 'prog' in child:
flip_axis = cub.parent_axis
# cub is found through local execution, this brings it into global space
apply_delta(bbox, rbox, cub)
# if intermediate cuboid, add back into queue
if child is not None and len(child) > 0:
q.append((child, cub, flip_axis))
# if leave cuboid, save these cuboid to the add list
else:
add.append(cub)
scene += add
verts = torch.tensor([],dtype=torch.float)
faces = torch.tensor([],dtype=torch.long)
for cube in scene:
v, f = cube.getTris()
if v is not None and f is not None:
faces = torch.cat((faces, (f + verts.shape[0])))
verts = torch.cat((verts, v))
if return_all:
return verts, faces, scene
return verts, faces
# Execute a hierarchical shapeassembly program
def hier_execute(root, return_all=False):
bbox = Cuboid('bbox')
bbox.dims = torch.tensor(
[float(a) for a in re.split(r'[()]', root['prog'][0])[1].split(',')[:3]]
)
q = [(root, bbox, None)]
scene = []
count = 0
while len(q) > 0:
node, bbox, flip_axis = q.pop(0)
lines = node["prog"]
TP = Program()
for line in lines:
TP.execute(line)
if flip_axis:
TP.flip(flip_axis)
rbox = TP.cuboids.pop('bbox')
add = []
i_map = {}
i = 0
for c_key in TP.cuboids.keys():
cub = TP.cuboids[c_key]
child = None
flip_axis = False
if '_ref' not in c_key and '_trans' not in c_key:
child = node["children"][i+1]
i_map[c_key] = i
i += 1
else:
pi = i_map[cub.parent]
child = deepcopy(node["children"][pi+1])
if cub.parent_axis is not None and 'prog' in child:
flip_axis = cub.parent_axis
# cub is found through local execution, this brings it into global space
apply_delta(bbox, rbox, cub)
# if intermediate cuboid, add back into queue
if child is not None and len(child) > 0:
q.append((child, cub, flip_axis))
# if leave cuboid, save these cuboid to the add list
else:
add.append(cub)
scene += add
verts = torch.tensor([],dtype=torch.float)
faces = torch.tensor([],dtype=torch.long)
for cube in scene:
v, f = cube.getTris()
if v is not None and f is not None:
faces = torch.cat((faces, (f + verts.shape[0])))
verts = torch.cat((verts, v))
if not return_all:
return verts, faces
else:
return verts, faces, scene
# Execute a hierarchical shapeassembly program
def hier_execute_to_npz(root):
bbox = Cuboid('bbox')
bbox.dims = torch.tensor(
[float(a) for a in re.split(r'[()]', root['prog'][0])[1].split(',')[:3]]
)
q = [(root, bbox, None)]
scene_cubes = []
scene_aps = []
count = 0
while len(q) > 0:
node, bbox, flip_axis = q.pop(0)
lines = node["prog"]
TP = Program()
for line in lines:
TP.execute(line)
if flip_axis:
TP.flip(flip_axis)
rbox = TP.cuboids.pop('bbox')
add_cubes = []
add_aps = [
(
ap,
get_pos_delta(bbox, rbox, TP.att_points[ap].getPos())
)
for ap in TP.att_points
]
i_map = {}
i = 0
remove_inds = set()
for c_key in TP.cuboids.keys():
cub = TP.cuboids[c_key]
child = None
flip_axis = False
if '_ref' not in c_key and '_trans' not in c_key:
child = node["children"][i+1]
i_map[c_key] = i
i += 1
else:
pi = i_map[cub.parent]
child = deepcopy(node["children"][pi+1])
if cub.parent_axis is not None and 'prog' in child:
flip_axis = cub.parent_axis
# cub is found through local execution, this brings it into global space
apply_delta(bbox, rbox, cub)
# if intermediate cuboid, add back into queue
if child is not None and len(child) > 0:
for j, (ap_name, _) in enumerate(add_aps):
remove_inds.add(j)
q.append((child, cub, flip_axis))
# if leave cuboid, save these cuboid to the add list
else:
add_cubes.append(cub)
scene_cubes += add_cubes
scene_aps += [
pos for i, (_, pos) in enumerate(add_aps) if i not in remove_inds
]
scene_cubes = [
torch.cat((
c.pos, c.dims, c.rfnorm, c.tfnorm
)).numpy() for c in scene_cubes
]
scene_cubes = np.array(scene_cubes)
scene_aps = [ap.numpy() for ap in scene_aps]
scene_aps = np.array(scene_aps)
return scene_cubes, scene_aps
class ShapeAssembly():
""" Contains entrypoint logic for parsing and executing entire programs"""
# Execute a program without any hierarchy
def run_local(self, lines, out_file):
P = Program()
for line in lines:
P.execute(line)
P.render(out_file)
# Execute a program differentiable w.r.t. to the parameters in the param_dict
def diff_run(self, hier, param_dict):
return diff_hier_execute(hier, param_dict)
# Execute a program
def run(self, lines, out_file):
hier_prog = make_hier_prog(lines)
verts, faces = hier_execute(hier_prog)
writeObj(verts, faces, out_file)
# load lines from a program file
def load_lines(self, prog_file):
lines = []
with open(prog_file) as f:
for line in f:
lines.append(line)
return lines
# Convert a hierarchy + dictionary of parameters into a full ShapeAssembly program
def fill_hier(self, hier, param_dict):
q = [hier]
while(len(q) > 0):
node = q.pop(0)
param_lines = param_dict.pop(node['name'])
lines = []
for func, tparams in param_lines:
params = [
round(p.item(), 2) if isinstance(p, torch.Tensor) else p \
for p in tparams
]
if func == "Cuboid":
lines.append(
assign(
params[0],
make_function(func, params[1:])
)
)
else:
lines.append(
make_function(func, params)
)
node['prog'] = lines
for c in node['children']:
if len(c) > 0:
q.append(c)
# Return a program hierarchy, a dictionary of
# nodes to lines (as tensors), and a list
# of all tensors in lines -> used during differentiable execution
def make_hier_param_dict(self, lines):
h = make_hier_prog(lines)
q = [h]
param_list = []
param_dict = {}
while len(q) > 0:
node = q.pop(0)
prog = node.pop('prog')
lines = []
for line in prog:
func, tensor, parse = lineToAttrs(line)
lines.append((func, parse))
if tensor.shape[0] > 0:
param_list.append(tensor)
param_dict[node['name']] = lines
for c in node['children']:
if len(c) > 0:
q.append(c)
return h, param_dict, param_list
if __name__ == '__main__':
mode, prog_file, out_file = sys.argv[1], sys.argv[2], sys.argv[3]
sa = ShapeAssembly()
lines = sa.load_lines(prog_file)
if mode == 'run':
sa.run(lines, out_file)
elif mode == 'run_local':
sa.run_local(lines, out_file)
else:
print(f'Mode {mode} not recognized')
| 52,690 | 30.875983 | 371 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/pc_encoder.py | import torch
import torch.nn as nn
from collections import namedtuple
import etw_pytorch_utils as pt_utils
from pointnet2.utils.pointnet2_modules import PointnetSAModule
class PCEncoder(nn.Module):
r"""
PointNet2 with single-scale grouping
Classification network
Parameters
----------
num_classes: int
Number of semantics classes to predict over -- size of softmax classifier
input_channels: int = 3
Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
use_xyz: bool = True
Whether or not to use the xyz position of a point as a feature
"""
def __init__(self, input_channels=3, use_xyz=True):
super(PCEncoder, self).__init__()
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModule(
npoint=512,
radius=0.2,
nsample=64,
mlp=[input_channels, 64, 64, 128],
use_xyz=use_xyz,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=128,
radius=0.4,
nsample=64,
mlp=[128, 128, 128, 256],
use_xyz=use_xyz,
)
)
self.SA_modules.append(
PointnetSAModule(mlp=[256, 256, 512, 1024], use_xyz=use_xyz)
)
self.FC_layer = (
pt_utils.Seq(1024)
.fc(256, bn=False, activation=None)
)
def forward(self, pointcloud):
# type: (Pointnet2SSG, torch.cuda.FloatTensor) -> pt_utils.Seq
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz = pointcloud.contiguous()
features = pointcloud.transpose(1, 2).contiguous()
for module in self.SA_modules:
xyz, features = module(xyz, features)
return self.FC_layer(features.squeeze(-1))
if __name__ == '__main__':
device = torch.device('cuda')
a = torch.randn(10,10000,3).to(device)
enc = PCEncoder().to(device)
print(enc(a).shape)
| 2,553 | 31.329114 | 112 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/infer_model_prog.py | import sys
sys.path.append("../")
sys.path.append("../../")
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from ShapeAssembly import Program, hier_execute, make_hier_prog, ShapeAssembly
import sa_utils as utils
import infer_recon_metrics
import argparse
import ast
import random
from torch.utils.data import DataLoader
import time
import matplotlib.pyplot as plt
from argparse import Namespace
import pickle
from make_abs_data import fillProgram
from tqdm import tqdm
import infer_sem_valid as sv
import numpy
import json
from pc_encoder import PCEncoder
MAX_LINES = 30
device = torch.device("cuda")
sa = ShapeAssembly()
outpath = "model_output"
SEQ_LEN = 0
MAX_DEPTH = 4
I_LENGTH = 11 # cuboid index
SQ_LENGTH = 6 # face index
SYM_LENGTH = 3 # axis index
VERBOSE = False
closs = nn.BCEWithLogitsLoss(reduction='none')
celoss = nn.CrossEntropyLoss(reduction='none')
FUNC_PRED_FIELD = 'func_pred'
CPARAM_PRED_FIELD = 'cparam_pred'
DPARAM_PRED_FIELD = 'dparam_pred'
TRAIN_LOG_INFO = [
('Total loss', 'loss', 'batch_count'),
('Func loss', 'func', 'batch_count'),
('Float Param loss', 'f_prm', 'batch_count'),
('Disc Param loss', 'd_prm', 'batch_count'),
('Bool Param loss', 'b_prm', 'batch_count'),
('BBox Param Loss', 'bbox', 'batch_count'),
('Child loss', 'child', 'batch_count'),
('KL loss', 'kl', 'batch_count'),
('Func Correct %', 'func_corr', 'func_total'),
('Disc Correct %', 'd_corr', 'd_total'),
('Bool Correct %', 'b_corr', 'b_total'),
('Child Correct %', 'c_corr', 'c_total'),
('Float Mean Error', 'f_prm', 'f_norm'),
]
EVAL_LOG_INFO = [
('CD', 'cd', 'no_norm'),
('Fscore-Def', 'fscore-def', 'no_norm'),
('Fscore-01', 'fscore-01', 'no_norm'),
('Fscore-03', 'fscore-03', 'no_norm'),
('Fscore-05', 'fscore-05', 'no_norm'),
('Rooted', 'rooted', 'no_norm'),
('Stable', 'stable', 'no_norm'),
('Prog Creation %', 'prog_creation_perc', 'no_norm'),
('Missing Line Number %', 'miss_ln', 'num_progs'),
('Extra Line Number %', 'extra_ln', 'num_progs'),
('Corr Child Number %', 'cn_corr', 'num_progs'),
('Func Correct %', 'func_corr', 'func_total'),
('Disc Correct %', 'd_corr', 'd_total'),
('Bool Correct %', 'b_corr', 'b_total'),
('Child Correct %', 'child_corr', 'child_total'),
('Float Mean Error', 'f_prm', 'num_progs'),
('BBox Mean Error', 'bbox', 'num_progs')
]
def weighted_mae_loss(input, target, weight):
return torch.sum(weight * (input - target).abs())
def make_function(name, args):
args = [str(arg) for arg in args]
return '{}({})'.format(name, ", ".join(args))
def assign(var_name, value):
return '{} = {}'.format(var_name, value)
# Multi-layer perceptron helper function
class MLP(nn.Module):
def __init__(self, ind, hdim1, hdim2, odim):
super(MLP, self).__init__()
self.l1 = nn.Linear(ind, hdim1)
self.l2 = nn.Linear(hdim1, hdim2)
self.l3 = nn.Linear(hdim2, odim)
def forward(self, x):
x = F.leaky_relu(self.l1(x),.2)
x = F.leaky_relu(self.l2(x),.2)
return self.l3(x)
# Unused, but can be used
class simplePCEncoder(nn.Module):
def __init__(self, feat_len):
super(simplePCEncoder, self).__init__()
self.conv1 = nn.Conv1d(3, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 128, 1)
self.conv4 = nn.Conv1d(128, feat_len, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(128)
self.bn4 = nn.BatchNorm1d(feat_len)
self.mlp2mu = nn.Linear(feat_len, feat_len)
def forward(self, pc):
net = pc.transpose(2, 1)
net = torch.relu(self.bn1(self.conv1(net)))
net = torch.relu(self.bn2(self.conv2(net)))
net = torch.relu(self.bn3(self.conv3(net)))
net = torch.relu(self.bn4(self.conv4(net)))
net = net.max(dim=2)[0]
return self.mlp2mu(net)
def getBatchEncData(batch):
pcs = torch.stack(batch,dim=0).to(device)
return pcs
def get_pc_encodings(batch, encoder):
pcs = getBatchEncData(batch)
codes = encoder(pcs)
return codes
def getShapeEvalResult(pred_node, gt_node):
# Corr line %, Func Corr %, Child Corr %, Bool Corr %, Disc Corr %, Float Mean Distance,
# child num corr, line numc corr
results = {}
ln = min(pred_node[FUNC_PRED_FIELD].shape[0], gt_node['e_func_gt'][1:-1].shape[0])
if pred_node[FUNC_PRED_FIELD].shape[0] < gt_node['e_func_gt'][1:-1].shape[0]:
results['miss_ln'] = 1.
else:
results['miss_ln'] = 0.
if pred_node[FUNC_PRED_FIELD].shape[0] > gt_node['e_func_gt'][1:-1].shape[0]:
results['extra_ln'] = 1.
else:
results['extra_ln'] = 0.
results['num_progs'] = 1.
func_pred = pred_node[FUNC_PRED_FIELD][:ln].cpu()
cparam_pred = pred_node[CPARAM_PRED_FIELD][:ln].cpu()
dparam_pred = pred_node[DPARAM_PRED_FIELD][:ln].cpu()
func_target = gt_node['e_func_gt'][1:1+ln].cpu()
float_target = gt_node['e_float_target'][1:ln+1].cpu()
float_mask = gt_node['e_float_mask'][1:ln+1].cpu()
disc_target = gt_node['e_disc_target'][1:ln+1].cpu()
disc_mask = gt_node['e_disc_mask'][1:ln+1].cpu()
bool_target = gt_node['e_bool_target'][1:ln+1].cpu()
bool_mask = gt_node['e_bool_mask'][1:ln+1].cpu()
results['func_corr'] = (
func_pred == func_target
).float().sum().item()
results['func_total'] = ln
results['b_corr'] = (
(cparam_pred == bool_target).float() * bool_mask
).sum().item()
results['b_total'] = (bool_mask.sum() + 1e-8).item()
results['d_corr'] = (
(dparam_pred == disc_target).float() * disc_mask
).sum().item()
results['d_total'] = (disc_mask.sum() + 1e-8).item()
results['f_prm'] = ((
((cparam_pred - float_target).abs() * float_mask).sum()
) / (float_mask.sum() + 1e-8)).item()
cn = min(len(pred_node['children']), len(gt_node['children']))
if len(pred_node['children']) == len(gt_node['children']):
results['cn_corr'] = 1.
else:
results['cn_corr'] = 0.
results['child_corr'] = 0.
results['child_total'] = cn
for pred_child, gt_child in zip(pred_node['children'][:cn], gt_node['children'][:cn]):
if len(pred_child) == 0 and len(gt_child) == 0:
results['child_corr'] += 1.
elif len(pred_child) > 0 and len(gt_child) > 0:
results['child_corr'] += 1.
child_results = getShapeEvalResult(pred_child, gt_child)
for key in child_results:
results[key] += child_results[key]
return results
def getBatchDecData(progs):
seq = torch.stack([p['seq'] for p in progs],dim=0)
inp_seq = seq[:,:-1,:]
tar_seq = seq[:,1:,:]
seq_weight = torch.stack([p['seq_mask'] for p in progs],dim=0)[:,1:]
fprm_weight = torch.stack([p['fprm_weight'] for p in progs], dim=0)[:, 1:]
children = [p['children'] for p in progs]
child_target = torch.stack([p['child_target'] for p in progs], dim=0)[:, 1:]
child_weight = torch.stack([p['child_weight'] for p in progs], dim=0)[:, 1:]
lnext_inds = ((child_target.bool()) & child_weight.bool()).nonzero().tolist()
cnext_inds = []
for i in range(len(progs)):
for j in progs[i]["exp_inds"]:
cnext_inds.append([i,j])
return inp_seq, tar_seq, seq_weight, fprm_weight, children, \
child_target, child_weight, lnext_inds, cnext_inds
# GRU recurrent Decoder
class dslDecoder(nn.Module):
def __init__(self, hidden_dim, metadata):
super(dslDecoder, self).__init__()
self.hidden_dim = hidden_dim
self.metadata = metadata
self.input_dim = metadata['tl_size']
self.bb_net = MLP(hidden_dim, hidden_dim, hidden_dim, 3)
self.inp_net = MLP(self.input_dim + MAX_DEPTH + 3, hidden_dim, hidden_dim, hidden_dim)
self.max_cparams = metadata['max_cparams']
self.num_funcs = len(metadata['cparam_map'])
self.gru = nn.GRU(hidden_dim, hidden_dim, batch_first = True)
self.max_children = metadata['max_children']
self.child_net = MLP(hidden_dim * 2, hidden_dim//2, hidden_dim//8, self.max_children)
on = self.max_children * hidden_dim
self.next_code_net = MLP(hidden_dim * 2, hidden_dim, hidden_dim, on)
tl_map = metadata['tl_map']
func_net = MLP(hidden_dim, hidden_dim//2, hidden_dim//4, self.num_funcs)
start, end = tl_map['func']
func_net.start = start
func_net.end = end
func_net.line_cond = None
func_net.bb_cond = None
func_net.name = 'func'
func_net.func = None
func_net._type = 'func'
net_list = [func_net]
for _,func in metadata['func_map'].items():
if len(metadata['dparam_map'][func]) > 0:
low = 1e8
high = -1e8
for i, prm in enumerate(metadata['dparam_map'][func]):
start, end = tl_map[f'{func}_{prm}_{i}']
mlp = MLP(hidden_dim, hidden_dim //2, hidden_dim//4, end-start)
mlp.start = start
mlp.end = end
mlp.line_cond = None
mlp.bb_cond = False
mlp.func = func
mlp.name = f'{func}_{prm}_{i}'
mlp._type = 'disc'
net_list.append(mlp)
low = min(low, start)
high = max(high, end)
else:
low = None
high = None
if f'{func}_f' in tl_map:
start, end = tl_map[f'{func}_f']
if low is None or high is None:
mlp = MLP(hidden_dim + 3, hidden_dim //2, hidden_dim//4, end-start)
mlp.line_cond = None
else:
mlp = MLP(hidden_dim + 3 + high-low, hidden_dim //2, hidden_dim//4, end-start)
mlp.line_cond = (low, high)
mlp.start = start
mlp.end = end
mlp.bb_cond = True
mlp.func = func
mlp.name = f'{func}_f'
mlp._type = 'f'
net_list.append(mlp)
if f'{func}_b' in tl_map:
start, end = tl_map[f'{func}_b']
mlp = MLP(hidden_dim, hidden_dim //2, hidden_dim//4, end-start)
mlp.start = start
mlp.end = end
mlp.line_cond = None
mlp.bb_cond = False
mlp.func = func
mlp.name = f'{func}_b'
mlp._type = 'b'
net_list.append(mlp)
self.net_list = nn.ModuleList(net_list)
def train_forward(self, inp_seq, code, _bb_dims, _hier_ind, gt_seq):
bb_dims = _bb_dims.unsqueeze(1).repeat(1,inp_seq.shape[1],1)
hier_oh = torch.zeros(
inp_seq.shape[0], inp_seq.shape[1], MAX_DEPTH, device=device
)
hier_oh[
torch.arange(inp_seq.shape[0],device=device),
:,
_hier_ind
] = 1.0
inp = self.inp_net(
torch.cat(
(inp_seq, bb_dims, hier_oh), dim=2)
)
gru_out, _ = self.gru(inp, code.unsqueeze(0).contiguous())
fstart, fend = self.metadata['tl_map']['func']
commands = torch.argmax(gt_seq[:,:,fstart:fend], dim = 2).flatten()
flat_out = torch.zeros(commands.shape[0], inp_seq.shape[2], device=device).float()
flat_gt_seq = gt_seq.reshape(commands.shape[0], -1)
flat_bb_dims = bb_dims.reshape(commands.shape[0], -1)
flat_gru_out = gru_out.reshape(commands.shape[0], -1)
for net in self.net_list:
if net.func is None:
# Func net
flat_out[:,net.start:net.end] = net(flat_gru_out)
else:
cmd_inds = (commands == net.func).nonzero().flatten()
if net.line_cond is not None:
line_cond = flat_gt_seq[cmd_inds, net.line_cond[0]:net.line_cond[1]]
else:
line_cond = torch.zeros(cmd_inds.shape[0], 0, device=device)
if net.bb_cond is True:
bb_cond = flat_bb_dims[cmd_inds,:]
else:
bb_cond = torch.zeros(cmd_inds.shape[0], 0, device=device)
flat_out[cmd_inds, net.start:net.end] = net(torch.cat((
flat_gru_out[cmd_inds,:], line_cond, bb_cond
), dim=1))
out = flat_out.view(inp_seq.shape)
double_enc = torch.cat((
gru_out, code.unsqueeze(1).repeat(1, gru_out.shape[1], 1)
), dim = 2)
child_pred = self.child_net(
double_enc
)
next_codes = self.next_code_net(
double_enc
).view(inp_seq.shape[0], inp_seq.shape[1], self.max_children, -1)
return out, next_codes, child_pred
def calc_loss(self, out, pchild, tar, child_tar, seq_weight, fprm_weight, child_weight):
result_map = {}
result_map['f_prm'] = weighted_mae_loss(out, tar, fprm_weight)
tl_map = self.metadata['tl_map']
fstart, fend = tl_map['func']
with torch.no_grad():
commands = torch.argmax(tar[:,:,fstart:fend], dim = 2).flatten()
pcommands = torch.argmax(out[:,:,fstart:fend], dim = 2).flatten()
result_map['func_corr'] = (
(commands == pcommands).float() * seq_weight.flatten()
).sum().item() * 1.0
result_map['func_total'] = seq_weight.sum().item()
result_map['func'] = (celoss(
out[:,:,fstart:fend].view(-1,fend-fstart),
commands
) * seq_weight.flatten()).sum()
result_map['child'] = (closs(pchild, child_tar) * child_weight).sum()
result_map['c_corr'] = (((pchild >= 0).float() == child_tar).float() * child_weight).sum().item()
result_map['c_total'] = (child_weight.sum() + 1e-8).item()
b_prm = torch.tensor(0,device=device).float()
b_corr = 0
b_total = 0
d_prm = torch.tensor(0,device=device).float()
d_corr = 0
d_total = 0
for key, (start, end) in tl_map.items():
if key == 'func':
continue
cmd = int(key.split('_')[0])
typ = key.split('_')[1]
if typ == 'f':
continue
cmd_mask = (commands == cmd).float().flatten()
if cmd_mask.sum() == 0:
continue
if typ == 'i' or typ == 'sq' or typ == 'sym':
with torch.no_grad():
ktar = torch.argmax(tar[:,:,start:end], dim=2).flatten()
kpout = torch.argmax(out[:,:,start:end], dim=2).flatten()
d_corr += (
(kpout == ktar).float() * cmd_mask
).sum().item() * 1.0
d_total += cmd_mask.sum().item()
d_prm += (celoss(
out[:,:,start:end].view(-1, end-start),
ktar
) * cmd_mask).sum()
elif typ == 'b':
with torch.no_grad():
ktar = tar[:, :, start:end].reshape(-1, end-start)
kpout = (out[:,:, start:end].reshape(-1, end-start) >= 0).float()
b_corr += (
(kpout == ktar).float() * cmd_mask.unsqueeze(-1)
).sum().item() * 1.0
b_total += cmd_mask.sum().item() * (end-start)
b_prm += (closs(
out[:,:,start:end].reshape(-1, end-start),
ktar
) * cmd_mask.unsqueeze(-1)).sum()
result_map['b_prm'] = b_prm
result_map['b_corr'] = b_corr + 1e-8
result_map['b_total'] = b_total + 1e-8
result_map['d_prm'] = d_prm
result_map['d_corr'] = d_corr + 1e-8
result_map['d_total'] = d_total + 1e-8
result_map['f_norm'] = (loss_config['f_prm'] * (fprm_weight.sum() + 1e-8)).item() *1.0
return result_map
def getStartLine(self):
l = torch.zeros(1,1,self.metadata['tl_size'],device=device).float()
l[0,0,0] = 1.0
return l
def decode_line(self, line):
_cparam = torch.zeros(self.metadata['max_cparams'], device=device).float()
_dparam = torch.zeros(self.metadata['max_dparams'], device=device).long()
fstart, fend = self.metadata['tl_map']['func']
cmd = line[fstart:fend].argmax().item()
float_preds = []
bool_preds = []
tl_map = self.metadata['tl_map']
if f'{cmd}_f' in tl_map:
fstart, fend = tl_map[f'{cmd}_f']
float_preds = line[fstart:fend].tolist()
if f'{cmd}_b' in tl_map:
bstart, bend = tl_map[f'{cmd}_b']
bool_preds = line[bstart:bend].tolist()
for i,prm in enumerate(self.metadata['cparam_map'][cmd]):
if prm == 'f':
v = float_preds.pop(0)
elif prm == 'b':
v = bool_preds.pop(0)
_cparam[i] = v
for i, prm in enumerate(self.metadata['dparam_map'][cmd]):
istart, iend = tl_map[f'{cmd}_{prm}_{i}']
v = torch.argmax(line[istart:iend]).item()
_dparam[i] = v
return cmd, _cparam, _dparam
def split_lines(self, lines):
p_func = []
p_cparam = []
p_dparam = []
for line in lines:
_f, _c, _d = self.decode_line(line)
p_func.append(_f)
p_cparam.append(_c)
p_dparam.append(_d)
return p_func, p_cparam, p_dparam
def eval_forward(self, inp_seq, code, code_start, bb_dims, hier_ind):
bb_dims = bb_dims.unsqueeze(0).unsqueeze(0).repeat(1,inp_seq.shape[1],1)
hier_oh = torch.zeros(1, inp_seq.shape[1], MAX_DEPTH).to(device)
hier_oh[0, :, min(hier_ind, 2)] = 1.0
inp = self.inp_net(
torch.cat(
(inp_seq, bb_dims, hier_oh), dim=2)
)
gru_out, h = self.gru(inp, code.view(1,1,-1))
out = torch.zeros(inp_seq.shape, device=device).float()
commands = None
for net in self.net_list:
if net.func is not None:
assert commands is not None
if net.func != commands:
continue
if net.line_cond is not None:
line_cond = out[:,:,net.line_cond[0]:net.line_cond[1]]
else:
line_cond = torch.zeros(inp_seq.shape[0], inp_seq.shape[1], 0, device=device)
if net.bb_cond is True:
bb_cond = bb_dims
else:
bb_cond = torch.zeros(inp_seq.shape[0], inp_seq.shape[1], 0, device=device)
raw_out = net(torch.cat((
gru_out, line_cond, bb_cond
), dim=2))
if net._type == 'func':
cmd = torch.argmax(raw_out.squeeze()).item()
out[0,0, net.start+cmd] = 1.0
assert commands == None
commands = cmd
elif net._type == 'disc':
m = torch.argmax(raw_out.squeeze()).item()
out[0,0, net.start+m] = 1.0
elif net._type == 'b':
r = (raw_out.squeeze() >= 0.).float()
out[0,0,net.start:net.end] = r
elif net._type == 'f':
bb_max = bb_cond.max().item()
r = torch.clamp(raw_out.squeeze(), 0.0, 10.)
out[0,0,net.start:net.end] = r
double_enc = torch.cat((
gru_out, code_start.repeat(1, gru_out.shape[1], 1)
), dim = 2)
child_pred = self.child_net(
double_enc
)
next_codes = self.next_code_net(
double_enc
).view(inp_seq.shape[0], inp_seq.shape[1], self.max_children, -1)
return out, next_codes, child_pred, h
def train_progs(self, batch, codes, loss_config):
result_map = {key:0. for key in loss_config}
bbox_target = torch.stack([b['bbox_gt'] for b in batch], dim=0)
bbox_pred = self.bb_net(codes)
bbox_loss = (bbox_target - bbox_pred).abs().sum()
result_map['bbox'] = bbox_loss
qp = batch
qe = codes
qbb = bbox_target
qhi = torch.zeros(len(batch), device=device).long()
while len(qp) > 0:
bs = min(len(batch), len(qp))
bprogs = qp[:bs]
bencs = qe[:bs]
bbb = qbb[:bs]
bhi = qhi[:bs]
qp = qp[bs:]
qe = qe[bs:]
qbb = qbb[bs:]
qhi = qhi[bs:]
inp_seq, tar_seq, seq_weights, fprm_weights, children, \
child_targets, child_weights, lnext_inds, cnext_inds = getBatchDecData(bprogs)
pout, pnext, pchild = self.train_forward(
inp_seq, bencs, bbb, bhi, tar_seq
)
_result = self.calc_loss(
pout,
pchild,
tar_seq,
child_targets,
seq_weights,
fprm_weights,
child_weights
)
for key in _result:
if key in result_map:
result_map[key] += _result[key]
else:
result_map[key] = _result[key]
_qp = []
_qe = []
_qbb = []
_qhi = []
for ((li, lj, lk), (ci, cj)) in zip(lnext_inds, cnext_inds):
_qp.append(children[ci][cj])
_qe.append(pnext[li, lj, lk])
_qbb.append(children[ci][cj]['bbox_gt'])
_qhi.append(bhi[li]+1)
if len(_qp) > 0:
qe = torch.cat((qe, torch.stack(_qe)), dim = 0)
qbb = torch.cat((qbb, torch.stack(_qbb).to(device)), dim = 0)
qp += _qp
qhi = torch.cat((qhi, torch.stack(_qhi)), dim = 0)
return result_map
def writeConfigFile(args):
os.system(f'mkdir {outpath} > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name} > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/plots > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/plots/train > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/plots/eval > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/programs > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/programs/train > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/programs/val > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/programs/test > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/programs/gt > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/objs > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/objs/train > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/objs/val > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/objs/test > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/objs/gt > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/models > /dev/null 2>&1')
with open(f"{outpath}/{args.exp_name}/config.txt", "w") as f:
f.write(f"{args}\n")
def get_max_sq_len(nodes):
m = 0
q = [n for _,n in nodes]
while len(q) > 0:
n = q.pop(0)
l = n['func_gt'].shape[0]
m = max(m, l)
for c in n['children']:
if len(c) > 0:
q.append(c)
return m
def addTargets(node, metadata):
gt_funcs = node['func_gt']
gt_cparams = node['cparam_gt']
gt_dparams = node['dparam_gt']
node['bbox_gt'] = torch.from_numpy(node['cparam_gt'][1][:3]).float().to(device)
seq = torch.zeros(SEQ_LEN, metadata['tl_size'], device=device).float()
fprm_weight = torch.zeros(SEQ_LEN, metadata['tl_size'], device=device).float()
seq_mask = torch.zeros(SEQ_LEN, device=device).float()
seq_end = 0
for i, fn in enumerate(gt_funcs.tolist()):
seq_mask[i] = 1.0
seq_end = i
line = torch.zeros(metadata['tl_size'], device=device).float()
weight = torch.zeros(metadata['tl_size'], device=device).float()
line[fn] = 1.0
float_vals = []
bool_vals = []
for j, tp in enumerate(metadata['cparam_map'][fn]):
if tp == 'f':
float_vals.append(gt_cparams[i][j].item())
elif tp == 'b':
bool_vals.append(gt_cparams[i][j].item())
else:
assert False, f'bad type {tp}'
if len(float_vals) > 0:
start, end = metadata['tl_map'][f'{fn}_f']
line[start:end] = torch.tensor(float_vals, device=device)
weight[start:end] = 1.0
if len(bool_vals) > 0:
start, end = metadata['tl_map'][f'{fn}_b']
line[start:end] = torch.tensor(bool_vals, device=device)
for j, prm in enumerate(metadata['dparam_map'][fn]):
tar = int(gt_dparams[i][j].item())
start, end = metadata['tl_map'][f'{fn}_{prm}_{j}']
line[start+tar] = 1.0
seq[i] = line
fprm_weight[i] = weight
node['seq'] = seq
node['fprm_weight'] = fprm_weight
node['seq_end'] = torch.tensor([seq_end],device=device).long()
node['seq_mask'] = seq_mask
child_target = torch.zeros(SEQ_LEN, metadata['max_children'], device=device).float()
child_weight = torch.zeros(SEQ_LEN, metadata['max_children'], device=device).float()
child_enc_mask = torch.zeros(SEQ_LEN, device = device).float()
child_to_ln_map = {}
for i, inds in enumerate(node['child_gt']):
for j, cind in enumerate(inds):
child_weight[i, j] = 1.0
child_to_ln_map[cind] = (i, j)
if len(node['children'][cind]) > 0:
child_target[i,j] = 1.0
child_enc_mask[i] = 1.0
node['child_target'] = child_target
node['child_weight'] = child_weight
node['child_to_ln_map'] = child_to_ln_map
node['child_enc_mask'] = child_enc_mask
node['exp_inds'] = []
for i, child in enumerate(node['children']):
if len(child) > 0:
node['exp_inds'].append(i)
addTargets(child, metadata)
gt_cparams = node['cparam_gt']
gt_dparams = node['dparam_gt']
bool_target = torch.zeros(gt_funcs.shape[0], metadata['max_cparams'], device=device)
bool_mask = torch.zeros(gt_funcs.shape[0], metadata['max_cparams'], device=device)
float_target = torch.zeros(gt_funcs.shape[0], metadata['max_cparams'], device=device)
float_mask = torch.zeros(gt_funcs.shape[0], metadata['max_cparams'], device=device)
for i, tf in enumerate(gt_funcs):
for j, tp in enumerate(metadata['cparam_map'][tf]):
if tp == 'f':
float_target[i][j] = gt_cparams[i][j].item()
float_mask[i][j] = 1.0
elif tp == 'b':
bool_target[i][j] = gt_cparams[i][j].item()
bool_mask[i][j] = 1.0
else:
assert False, f'bad type {tp}'
disc_target = torch.zeros(gt_funcs.shape[0], metadata['max_dparams'], device=device).long()
disc_mask = torch.zeros(gt_funcs.shape[0], metadata['max_dparams'], device=device)
for i, tf in enumerate(gt_funcs):
for j, _ in enumerate(metadata['dparam_map'][tf]):
disc_target[i][j] = gt_dparams[i][j].item()
disc_mask[i][j] = 1.0
node['e_func_gt'] = torch.tensor(node['func_gt'], device=device).long()
node['e_cparam_gt'] = torch.tensor(node['cparam_gt'], device=device).float()
node['e_dparam_gt'] = torch.tensor(node['dparam_gt'], device=device).long()
node['e_bool_target'] = bool_target.float()
node['e_float_target'] = float_target.float()
node['e_disc_target'] = disc_target.long()
node['e_bool_mask'] = bool_mask.float()
node['e_float_mask'] = float_mask.float()
node['e_disc_mask'] = disc_mask.float()
def _col(samples):
return samples
def _bcol(samples):
return samples
# Full encoder + decoder training logic for a single program (i.e. a batch)
def model_train(batch, encoder, decoder, enc_opt, dec_opt, loss_config):
codes = get_pc_encodings([b[2] for b in batch], encoder)
shape_result = decoder.train_progs([b[1] for b in batch], codes, loss_config)
loss = 0.
for key in loss_config:
loss += (loss_config[key] * shape_result[key]) / len(batch)
shape_result[key] = (loss_config[key] * shape_result[key].item()) / len(batch)
if torch.is_tensor(loss) and enc_opt is not None and dec_opt is not None:
dec_opt.zero_grad()
enc_opt.zero_grad()
loss.backward()
dec_opt.step()
enc_opt.step()
shape_result['loss'] = loss.item()
return shape_result
def model_train_results(dataset, encoder, decoder, enc_opt, dec_opt, loss_config):
if enc_opt is not None and dec_opt is not None:
decoder.train()
encoder.train()
else:
decoder.eval()
encoder.eval()
ep_result = {}
bc = 0.
for batch in dataset:
bc += 1.
batch_result = model_train(
batch, encoder, decoder, dec_opt, enc_opt, loss_config
)
for key in batch_result:
if key not in ep_result:
ep_result[key] = batch_result[key]
else:
ep_result[key] += batch_result[key]
ep_result['batch_count'] = bc
return ep_result
def model_eval(
eval_train_dataset, eval_val_dataset, eval_test_dataset, encoder, decoder, exp_name, epoch, num_write, metadata
):
eval_results = {}
for name, dataset in [
('train', eval_train_dataset), ('val', eval_val_dataset), ('test', eval_test_dataset)
]:
if len(dataset) == 0:
eval_results[name] = {}
continue
named_results = {
'count': 0.,
'miss_hier_prog': 0.,
'no_norm': 1.0
}
recon_sets = []
for batch in dataset:
assert len(batch) == 1, 'batch size 1'
shape = batch[0]
named_results['count'] += 1.
code = get_pc_encodings([shape[2]], encoder)
node = sv.sem_eval_prog(decoder, code.squeeze())
try:
shape_result = getShapeEvalResult(node, shape[1])
shape_result['bbox'] = (node['bb_dims'] - shape[1]['bbox_gt']).abs().sum().item()
except Exception as e:
if VERBOSE:
print(f"FAILED SHAPE EVAL RESULT WITH {e}")
shape_result = {}
for key in shape_result:
if key not in named_results:
named_results[key] = shape_result[key]
else:
named_results[key] += shape_result[key]
try:
fillProgram(
metadata['dsl'],
node,
metadata,
FUNC_PRED_FIELD,
CPARAM_PRED_FIELD,
DPARAM_PRED_FIELD,
)
recon_sets.append((node, shape[1], shape[0], shape[2]))
except Exception as e:
if VERBOSE:
print(f"Failed Recon Program with {e}")
named_results[f'miss_hier_prog'] += 1.
# For reconstruction, get metric performance
recon_results, recon_misses = infer_recon_metrics.recon_metrics(
recon_sets, outpath, exp_name, name, epoch, VERBOSE, num_write + 1
)
for key in recon_results:
named_results[key] = recon_results[key]
named_results[f'miss_hier_prog'] += recon_misses
named_results[f'prog_creation_perc'] = (
named_results[f'count'] - named_results[f'miss_hier_prog']
) / named_results[f'count']
eval_results[name] = named_results
return eval_results
def print_train_results(result, exp_name):
res = ""
for name, key, norm_key in TRAIN_LOG_INFO:
if key in result:
res += f" {name} : {round(result[key] / (result[norm_key]+1e-8), 2)}\n"
utils.log_print(res, f"{outpath}/{exp_name}/log.txt")
def print_eval_results(result, exp_name):
res = ""
for name, key, norm_key in EVAL_LOG_INFO:
if key in result:
res += f" {name} : {round(result[key] / (result[norm_key]+1e-8), 4)}\n"
utils.log_print(res, f"{outpath}/{exp_name}/log.txt")
def make_train_plots(train_result, val_result, train_plots, aepochs, exp_name):
for name, key, norm_key in TRAIN_LOG_INFO:
for rname, result in [('train', train_result), ('val', val_result)]:
if key not in result:
continue
res = result[key] / (result[norm_key]+1e-8)
if name not in train_plots[rname]:
train_plots[rname][name] = [res]
else:
train_plots[rname][name].append(res)
if name not in train_plots['train']:
continue
plt.clf()
plt.plot(aepochs, train_plots['train'][name], label='train')
if name in train_plots['val']:
plt.plot(aepochs, train_plots['val'][name], label='val')
plt.legend()
plt.grid()
plt.savefig(f"{outpath}/{exp_name}/plots/train/{name}.png")
def make_eval_plots(eval_result, eval_plots, aepochs, exp_name):
for name, key, norm_key in EVAL_LOG_INFO:
for rname, result in [
('train', eval_result['train']), ('val', eval_result['val']), ('test', eval_result['test'])
]:
if key not in result:
continue
res = result[key] / (result[norm_key]+1e-8)
if not name in eval_plots[rname]:
eval_plots[rname][name] = [res]
else:
eval_plots[rname][name].append(res)
if name not in eval_plots['train']:
continue
plt.clf()
plt.plot(aepochs, eval_plots['train'][name], label='train')
if name in eval_plots['val']:
plt.plot(aepochs, eval_plots['val'][name], label='val')
if name in eval_plots['test']:
plt.plot(aepochs, eval_plots['test'][name], label='test')
plt.legend()
plt.grid()
plt.savefig(f"{outpath}/{exp_name}/plots/eval/{name}.png")
# Helper function for keeping consistent train/val splits
def getInds(train_ind_file):
inds = set()
with open(train_ind_file) as f:
for line in f:
inds.add(line.strip())
return inds
def get_tensor_layout(metadata):
# Figure out size of tensor
# map from (func, type) -> indices in tensor
tl_map = {}
size = len(metadata['func_map'])
tl_map['func'] = (0, size)
start = size
size += (I_LENGTH * metadata['max_d_i_params']) \
+ (SQ_LENGTH * metadata['max_d_sq_params']) \
+ (SYM_LENGTH * metadata['max_d_sym_params'])
for func, prms in metadata['dparam_map'].items():
i_start = start
sq_start = i_start + I_LENGTH * metadata['max_d_i_params']
sym_start = sq_start + SQ_LENGTH * metadata['max_d_sq_params']
for i, _typ in enumerate(prms):
if _typ == 'i':
opt_len = I_LENGTH
_start = i_start
i_start += opt_len
elif _typ == 'sq':
opt_len = SQ_LENGTH
_start = sq_start
sq_start += opt_len
elif _typ == 'sym':
opt_len = SYM_LENGTH
_start = sym_start
sym_start += opt_len
tl_map[f'{func}_{_typ}_{i}'] = (
_start,
_start + opt_len,
)
for func, prms in metadata['cparam_map'].items():
nf = 0
nb = 0
for prm in prms:
if 'f' in prm:
nf += 1
elif 'b' in prm:
nb += 1
if nf > 0:
size += nf
tl_map[f'{func}_f'] = (size-nf, size)
if nb > 0:
size += nb
tl_map[f'{func}_b'] = (size-nb, size)
return size, tl_map
# Main entry-point of modeling logic
def run_train(
dataset_path,
exp_name,
max_shapes,
epochs,
hidden_dim,
eval_per,
loss_config,
rd_seed,
print_per,
num_write,
dec_lr,
enc_lr,
save_per,
category,
batch_size
):
random.seed(rd_seed)
numpy.random.seed(rd_seed)
torch.manual_seed(rd_seed)
raw_data = pickle.load(open(f"{dataset_path}_train.data", "rb"))
metadata = pickle.load(open(f"{dataset_path}_train.meta", "rb"))
metadata['max_children'] = max([int(i) for i in metadata['num_cube_map'].values()])
metadata['rev_func_map'] = {v:k for k, v in metadata['func_map'].items()}
for key in ('i', 'sq', 'sym'):
metadata[f'max_d_{key}_params'] = max([len([ __l for __l in _l if __l == key]) for _l in metadata['dparam_map'].values()])
tl_size, tl_map = get_tensor_layout(metadata)
metadata['tl_size'] = tl_size
metadata['tl_map'] = tl_map
all_inds = []
all_data = []
all_pc = []
good_inds = []
max_sq_len = get_max_sq_len(raw_data)
print(f"Seq len: {max_sq_len}")
global SEQ_LEN
SEQ_LEN = max_sq_len
for d in tqdm(raw_data):
if len(all_inds) >= max_shapes:
break
if len(good_inds) > 0 and d[0] not in good_inds:
continue
addTargets(d[1], metadata)
fillProgram(
metadata['dsl'],
d[1],
metadata,
'func_gt',
'cparam_gt',
'dparam_gt'
)
pc = numpy.load(f'pc_data/{category}/{d[0]}.pts.npy')
tpc = torch.from_numpy(pc)
all_data.append(d[1])
all_inds.append(d[0])
all_pc.append(tpc)
samples = list(zip(all_inds, all_data, all_pc))
train_ind_file = f'pc_data_splits/{category}/train.txt'
val_ind_file = f'pc_data_splits/{category}/val.txt'
test_ind_file = f'pc_data_splits/{category}/test.txt'
train_samples = []
val_samples = []
test_samples = []
train_inds = getInds(train_ind_file)
val_inds = getInds(val_ind_file)
test_inds = getInds(test_ind_file)
misses = 0.
num_parts = []
for (ind, prog, pc) in samples:
if ind in train_inds or ind in good_inds:
train_samples.append((ind, prog, pc))
elif ind in val_inds:
val_samples.append((ind, prog, pc))
elif ind in test_inds:
test_samples.append((ind, prog, pc))
else:
misses += 1
if len(good_inds) > 0:
val_samples = train_samples[:1]
test_samples = train_samples[:1]
print(f"Samples missed: {misses}")
train_num = len(train_samples)
val_num = len(val_samples)
test_num = len(test_samples)
train_dataset = DataLoader(
train_samples, batch_size, shuffle=True, collate_fn = _bcol
)
val_dataset = DataLoader(
val_samples, batch_size, shuffle = False, collate_fn = _bcol
)
num_eval = max(val_num, test_num, len(good_inds))
eval_train_dataset = DataLoader(
train_samples[:num_eval], 1, shuffle=False, collate_fn = _col
)
eval_val_dataset = DataLoader(
val_samples[:num_eval], 1, shuffle = False, collate_fn = _col
)
eval_test_dataset = DataLoader(
test_samples[:num_eval], 1, shuffle = False, collate_fn = _col
)
utils.log_print(f"Training size: {len(train_samples)}", f"{outpath}/{exp_name}/log.txt")
utils.log_print(f"Validation size: {len(val_samples)}", f"{outpath}/{exp_name}/log.txt")
utils.log_print(f"Test size: {len(test_samples)}", f"{outpath}/{exp_name}/log.txt")
val_epochs = []
train_epochs = []
train_plots = {'train': {}, 'val': {}}
eval_plots = {'train': {}, 'val': {}, 'test': {}}
encoder = PCEncoder()
decoder = dslDecoder(
hidden_dim,
metadata,
)
encoder.to(device)
decoder.to(device)
dec_opt = torch.optim.Adam(
decoder.parameters(),
lr = dec_lr,
eps = 1e-6
)
enc_opt = torch.optim.Adam(
encoder.parameters(),
lr = enc_lr,
eps = 1e-6
)
print('training ...')
for e in range(0, epochs):
json.dump({
'train': train_plots,
'eval': eval_plots,
'train_epochs': train_epochs,
'val_epochs': val_epochs,
}, open(f"{outpath}/{exp_name}/res.json" ,'w'))
decoder.epoch = e
do_print = (e+1) % print_per == 0
t = time.time()
if do_print:
utils.log_print(f"\nEpoch {e}:", f"{outpath}/{exp_name}/log.txt")
train_result = model_train_results(
train_dataset,
encoder,
decoder,
enc_opt,
dec_opt,
loss_config,
)
if do_print:
with torch.no_grad():
val_result = model_train_results(
val_dataset, encoder, decoder, None, None,
loss_config
)
train_epochs.append(e)
make_train_plots(train_result, val_result, train_plots, train_epochs, exp_name)
utils.log_print(
f"Train results: ", f"{outpath}/{exp_name}/log.txt"
)
print_train_results(train_result, exp_name)
utils.log_print(
f"Val results: ", f"{outpath}/{exp_name}/log.txt"
)
print_train_results(val_result, exp_name)
utils.log_print(
f" Time = {time.time() - t}", f"{outpath}/{exp_name}/log.txt"
)
with torch.no_grad():
if (e+1) % eval_per == 0:
decoder.eval()
encoder.eval()
t = time.time()
eval_results = model_eval(
eval_train_dataset,
eval_val_dataset,
eval_test_dataset,
encoder,
decoder,
exp_name,
e,
num_write,
metadata
)
utils.log_print(f"Evaluation training set results:", f"{outpath}/{exp_name}/log.txt")
print_eval_results(eval_results['train'], exp_name)
utils.log_print(f"Evaluation validation set results:", f"{outpath}/{exp_name}/log.txt")
print_eval_results(eval_results['val'], exp_name)
utils.log_print(f"Evaluation test set results:", f"{outpath}/{exp_name}/log.txt")
print_eval_results(eval_results['test'], exp_name)
utils.log_print(f"Eval Time = {time.time() - t}", f"{outpath}/{exp_name}/log.txt")
val_epochs.append(e)
make_eval_plots(eval_results, eval_plots, val_epochs, exp_name)
if (e+1) % save_per == 0:
utils.log_print("Saving Models", f"{outpath}/{exp_name}/log.txt")
torch.save(decoder.state_dict(), f"{outpath}/{exp_name}/models/decoder_{e}.pt")
torch.save(encoder.state_dict(), f"{outpath}/{exp_name}/models/encoder_{e}.pt")
def run_eval(args):
raw_data = pickle.load(open(f"{args.dataset_path}_train.data", "rb"))
metadata = pickle.load(open(f"{args.dataset_path}_train.meta", "rb"))
metadata['max_children'] = max([int(i) for i in metadata['num_cube_map'].values()])
metadata['rev_func_map'] = {v:k for k, v in metadata['func_map'].items()}
for key in ('i', 'sq', 'sym'):
metadata[f'max_d_{key}_params'] = max([len([ __l for __l in _l if __l == key]) for _l in metadata['dparam_map'].values()])
tl_size, tl_map = get_tensor_layout(metadata)
metadata['tl_size'] = tl_size
metadata['tl_map'] = tl_map
inds = []
pc_data = []
gt_progs = []
test_ind_file = f'pc_data_splits/{args.category}/test.txt'
test_inds = getInds(test_ind_file)
for d in tqdm(raw_data):
if len(inds) > args.num_gen:
break
if d[0] not in test_inds:
continue
fillProgram(
metadata['dsl'],
d[1],
metadata,
'func_gt',
'cparam_gt',
'dparam_gt'
)
inds.append(d[0])
gt_progs.append(d[1])
pc = numpy.load(f'pc_data/{args.category}/{d[0]}.pts.npy')
tpc = torch.from_numpy(pc).to(device)
pc_data.append(tpc)
encoder = PCEncoder()
decoder = dslDecoder(
args.hidden_dim,
metadata,
)
encoder.load_state_dict(torch.load(
f'{args.exp_name}/models/encoder_{args.load_epoch}.pt'
))
decoder.load_state_dict(torch.load(
f'{args.exp_name}/models/decoder_{args.load_epoch}.pt'
))
encoder.to(device)
decoder.to(device)
encoder.eval()
decoder.eval()
outname = f'{args.exp_name}/infer_output'
os.system(f'mkdir {outname}')
os.system(f'mkdir {args.exp_name}/infer_output/objs')
os.system(f'mkdir {args.exp_name}/infer_output/progs')
for ind, pc, gtprog in tqdm(list(zip(inds, pc_data, gt_progs))):
code = get_pc_encodings([pc], encoder)
node = sv.sem_eval_prog(decoder, code.squeeze())
fillProgram(
metadata['dsl'],
node,
metadata,
FUNC_PRED_FIELD,
CPARAM_PRED_FIELD,
DPARAM_PRED_FIELD,
)
try:
pverts, pfaces = hier_execute(node)
gtverts, gtfaces = hier_execute(gtprog)
except Exception:
continue
utils.writeObj(
pverts, pfaces, f'{outname}/objs/{ind}_pred_prog.obj'
)
utils.writeObj(
gtverts, gtfaces, f'{outname}/objs/{ind}_gt_prog.obj'
)
utils.writeHierProg(node, 'dsl_prog', f"{outname}/progs/{ind}_pred_prog.txt")
utils.writeHierProg(gtprog, 'dsl_prog', f"{outname}/progs/{ind}_gt_prog.txt")
utils.writeSPC(
pc.cpu().numpy(), f'{outname}/objs/{ind}_gt_pc.obj'
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run Visual Program Induction model")
parser.add_argument('-ds', '--dataset_path', help='Path to program data, e.g. data/shapemod_chair', type = str)
parser.add_argument('-en', '--exp_name', help='name of experiment', type = str)
parser.add_argument('-c', '--category', type = str, help = 'category of PartNet')
parser.add_argument('-ms', '--max_shapes', default = 100000, type = int, help = 'max number of shapes to train/evaluate on ')
parser.add_argument('-e', '--epochs', default = 2000, type = int, help = 'number of epochs to run for')
parser.add_argument('-hd', '--hidden_dim', default = 256, type = int, help = 'hidden dimension size')
parser.add_argument('-prp', '--print_per', default = 10, type = int, help = 'how often to print out training set statistics')
parser.add_argument('-evp', '--eval_per', default = 50, type = int, help = 'how often to run evaluation statistics')
parser.add_argument('-sp', '--save_per', default = 10, type = int, help = 'how often to save the model')
parser.add_argument('-enc_lr', '--enc_lr', default = 0.0002, type = float, help = 'encoder learning rate')
parser.add_argument('-dec_lr', '--dec_lr', default = 0.0002, type = float, help = 'decoder learning rate')
parser.add_argument('-rd', '--rd_seed', default = 42, type = int, help = 'random seed')
parser.add_argument('-ng', '--num_gen', default = 1000, type = int, help = 'number of shapes to generate each generation period')
parser.add_argument('-nw', '--num_write', default = 25, type = int, help = 'number of shapes to write to .obj each evaluation period')
parser.add_argument('-f_lw', '--f_lw', default = 50., type = float, help = 'weight on loss of continuous parameters')
parser.add_argument('-d_lw', '--d_lw', default = 1., type = float, help = 'weight on loss of discrete parameters')
parser.add_argument('-b_lw', '--b_lw', default = 1., type = float, help = 'weight on loss of boolean parameters')
parser.add_argument('-c_lw', '--c_lw', default = 1., type = float, help = 'weight on loss of child predictions')
parser.add_argument('-fn_lw', '--fn_lw', default = 1., type = float, help = 'weight on loss of function predictions')
parser.add_argument('-b', '--batch_size', default = 32, type=int, help = 'batch size')
parser.add_argument('-le', '--load_epoch', default = None, type=int, help = 'model epoch to load from pre-trained model')
parser.add_argument('-m', '--mode', default = 'train', type=str, help = 'whether to train new model or generate samples from pre-trained model')
args = parser.parse_args()
loss_config = {
'd_prm': args.d_lw,
'f_prm': args.f_lw,
'bbox': args.f_lw,
'b_prm': args.b_lw,
'child': args.c_lw,
'func': args.fn_lw,
}
if args.mode == 'train':
writeConfigFile(args)
run_train(
dataset_path=args.dataset_path,
exp_name=args.exp_name,
max_shapes=args.max_shapes,
epochs=args.epochs,
hidden_dim=args.hidden_dim,
eval_per=args.eval_per,
loss_config=loss_config,
rd_seed=args.rd_seed,
print_per=args.print_per,
num_write=args.num_write,
enc_lr=args.enc_lr,
dec_lr=args.dec_lr,
save_per=args.save_per,
category=args.category,
batch_size=args.batch_size
)
elif args.mode == 'infer':
run_eval(args)
else:
print(f"Bad mode {args.mode}")
| 53,175 | 34.450667 | 148 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/model_prog.py | import sys
sys.path.append("../")
sys.path.append("../../")
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from ShapeAssembly import Program, hier_execute, make_hier_prog, ShapeAssembly
import sa_utils as utils
import recon_metrics
import gen_metrics
import argparse
import ast
import random
from torch.utils.data import DataLoader
import time
import matplotlib.pyplot as plt
from argparse import Namespace
import pickle
from make_abs_data import fillProgram, getCuboidDims
from tqdm import tqdm
import sem_valid as sv
import numpy
import json
MAX_LINES = 30
device = torch.device("cuda")
sa = ShapeAssembly()
outpath = "model_output"
SEQ_LEN = 0
MAX_DEPTH = 4
I_LENGTH = 11 # cuboid index
SQ_LENGTH = 6 # face index
SYM_LENGTH = 3 # axis index
VERBOSE = False
closs = nn.BCEWithLogitsLoss(reduction='none')
celoss = nn.CrossEntropyLoss(reduction='none')
FUNC_PRED_FIELD = 'func_pred'
CPARAM_PRED_FIELD = 'cparam_pred'
DPARAM_PRED_FIELD = 'dparam_pred'
USE_SEM_VALID = True
TRAIN_LOG_INFO = [
('Total loss', 'loss', 'batch_count'),
('Func loss', 'func', 'batch_count'),
('Float Param loss', 'f_prm', 'batch_count'),
('Disc Param loss', 'd_prm', 'batch_count'),
('Bool Param loss', 'b_prm', 'batch_count'),
('BBox Param Loss', 'bbox', 'batch_count'),
('Child loss', 'child', 'batch_count'),
('KL loss', 'kl', 'batch_count'),
('Func Correct %', 'func_corr', 'func_total'),
('Disc Correct %', 'd_corr', 'd_total'),
('Bool Correct %', 'b_corr', 'b_total'),
('Child Correct %', 'c_corr', 'c_total'),
('Float Mean Error', 'f_prm', 'f_norm'),
]
EVAL_LOG_INFO = [
('CD', 'cd', 'no_norm'),
('Fscore-Def', 'fscore-def', 'no_norm'),
('Fscore-03', 'fscore-03', 'no_norm'),
('Fscore-05', 'fscore-05', 'no_norm'),
('IoU', 'iou', 'no_norm'),
('Prog Creation %', 'prog_creation_perc', 'no_norm'),
('Missing Line Number %', 'miss_ln', 'num_progs'),
('Extra Line Number %', 'extra_ln', 'num_progs'),
('Corr Child Number %', 'cn_corr', 'num_progs'),
('Func Correct %', 'func_corr', 'func_total'),
('Disc Correct %', 'd_corr', 'd_total'),
('Bool Correct %', 'b_corr', 'b_total'),
('Child Correct %', 'child_corr', 'child_total'),
('Float Mean Error', 'f_prm', 'num_progs'),
('BBox Mean Error', 'bbox', 'num_progs')
]
GEN_LOG_INFO = [
('Prog Creation %', 'prog_creation_perc'),
('Number of parts', 'num_parts'),
('Rootedness', 'rootedness'),
('Stability', 'stability'),
('Val FD', 'val_fd'),
('Train FD', 'train_fd'),
('Generalization', 'gen'),
('Coverage', 'cov'),
('Variety', 'var'),
]
def weighted_mae_loss(input, target, weight):
return torch.sum(weight * (input - target).abs())
def make_function(name, args):
args = [str(arg) for arg in args]
return '{}({})'.format(name, ", ".join(args))
def assign(var_name, value):
return '{} = {}'.format(var_name, value)
# Multi-layer perceptron helper function
class MLP(nn.Module):
def __init__(self, ind, hdim1, hdim2, odim):
super(MLP, self).__init__()
self.l1 = nn.Linear(ind, hdim1)
self.l2 = nn.Linear(hdim1, hdim2)
self.l3 = nn.Linear(hdim2, odim)
def forward(self, x):
x = F.leaky_relu(self.l1(x),.2)
x = F.leaky_relu(self.l2(x),.2)
return self.l3(x)
class Sampler(nn.Module):
def __init__(self, feature_size, hidden_size):
super(Sampler, self).__init__()
self.mlp1 = nn.Linear(feature_size, hidden_size)
self.mlp2mu = nn.Linear(hidden_size, feature_size)
self.mlp2var = nn.Linear(hidden_size, feature_size)
def forward(self, x, variational):
encode = torch.relu(self.mlp1(x))
mu = self.mlp2mu(encode)
if not variational:
return mu
else:
logvar = self.mlp2var(encode)
std = logvar.mul(0.5).exp_()
eps = torch.randn_like(std)
kld = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
return torch.cat([eps.mul(std).add_(mu), kld], 1)
# Given a sample (a hierarchical program) get the encoding of it and the KL loss
def get_encodings(batch, encoder, variational):
hd = encoder.hidden_dim
batch_out = encoder.get_latent_codes(batch, variational)
enc = batch_out[:,:hd]
kld = batch_out[:,hd:]
if variational:
return enc, -kld.sum()
else:
return enc, 0.0
def getBatchEncData(progs):
seq = torch.stack([p[0]['seq'] for p in progs],dim=0)
seq_ends = torch.cat([p[0]['seq_end'] for p in progs], dim=0)
child_masks = torch.stack([p[0]['child_enc_mask'] for p in progs], dim=0).unsqueeze(-1)
inds = torch.tensor([p[1] for p in progs])
par_inds = torch.tensor([p[2] for p in progs])
par_lns = torch.tensor([p[3] for p in progs])
par_l_cns = torch.tensor([p[4] for p in progs])
return seq, seq_ends, child_masks, inds, par_inds, par_lns, par_l_cns
# Encodes an entire hierarchical program
class dslEncoder(nn.Module):
def __init__(self, hidden_dim, metadata):
super(dslEncoder, self).__init__()
self.hidden_dim = hidden_dim
self.metadata = metadata
self.input_dim = metadata['tl_size']
self.inp_net = MLP(self.input_dim, hidden_dim, hidden_dim, hidden_dim)
self.h_start = torch.nn.Parameter(
torch.randn(1, 1, hidden_dim, device=device, requires_grad=True)
)
self.gru = nn.GRU(hidden_dim, hidden_dim, batch_first = True)
self.sampler = Sampler(hidden_dim, hidden_dim)
def encode_programs(self, batch):
hier_levels = {}
q = [(b, 0, 1e8, 1e8, 1e8) for b in batch]
ind = 0
while len(q) > 0:
n, hl, pind, pln, pl_cn = q.pop(0)
if hl not in hier_levels:
hier_levels[hl] = [(n, ind, pind, pln, pl_cn)]
else:
hier_levels[hl].append((n, ind, pind, pln, pl_cn))
c_ln_map = n['child_to_ln_map']
for cn, child in enumerate(n['children']):
if len(child) > 0:
q.append((child, hl+1, ind, c_ln_map[cn][0], c_ln_map[cn][1]))
ind += 1
hl_keys = list(hier_levels.keys())
hl_keys.sort(reverse=True)
child_codes = torch.zeros(
ind, SEQ_LEN, self.metadata['max_children'], self.hidden_dim
).float().to(device)
for hl in hl_keys[:-1]:
qp = hier_levels[hl]
while len(qp) > 0:
bs = min(len(batch), len(qp))
bprogs = qp[:bs]
qp = qp[bs:]
seq, seq_ends, child_masks, inds, \
par_inds, par_lns, par_l_cns = getBatchEncData(bprogs)
local_inp = self.inp_net(seq)
child_encs = child_codes[inds].sum(dim=2)
inp = (child_masks * child_encs) + ((1-child_masks) * local_inp)
gru_out, _ = self.gru(inp, self.h_start.repeat(1,inp.shape[0],1))
h = gru_out[
torch.arange(seq_ends.shape[0], device=device),
seq_ends
]
child_codes[par_inds, par_lns, par_l_cns] = h
# special case last one
seq, seq_ends, child_masks, inds, _, _, _ = getBatchEncData(hier_levels[0])
local_inp = self.inp_net(seq)
child_encs = child_codes[inds].sum(dim=2)
inp = (child_masks * child_encs) + ((1-child_masks) * local_inp)
gru_out, _ = self.gru(inp, self.h_start.repeat(1,inp.shape[0],1))
h = gru_out[
torch.arange(seq_ends.shape[0], device=device),
seq_ends
]
return h
def get_latent_codes(self, batch, variational):
zs = self.encode_programs(batch)
return self.sampler(zs, variational)
def getShapeEvalResult(pred_node, gt_node):
# Corr line %, Func Corr %, Child Corr %, Bool Corr %, Disc Corr %, Float Mean Distance,
# child num corr, line numc corr
results = {}
ln = min(pred_node[FUNC_PRED_FIELD].shape[0], gt_node['e_func_gt'][1:-1].shape[0])
if pred_node[FUNC_PRED_FIELD].shape[0] < gt_node['e_func_gt'][1:-1].shape[0]:
results['miss_ln'] = 1.
else:
results['miss_ln'] = 0.
if pred_node[FUNC_PRED_FIELD].shape[0] > gt_node['e_func_gt'][1:-1].shape[0]:
results['extra_ln'] = 1.
else:
results['extra_ln'] = 0.
results['num_progs'] = 1.
func_pred = pred_node[FUNC_PRED_FIELD][:ln].cpu()
cparam_pred = pred_node[CPARAM_PRED_FIELD][:ln].cpu()
dparam_pred = pred_node[DPARAM_PRED_FIELD][:ln].cpu()
func_target = gt_node['e_func_gt'][1:1+ln].cpu()
float_target = gt_node['e_float_target'][1:ln+1].cpu()
float_mask = gt_node['e_float_mask'][1:ln+1].cpu()
disc_target = gt_node['e_disc_target'][1:ln+1].cpu()
disc_mask = gt_node['e_disc_mask'][1:ln+1].cpu()
bool_target = gt_node['e_bool_target'][1:ln+1].cpu()
bool_mask = gt_node['e_bool_mask'][1:ln+1].cpu()
results['func_corr'] = (
func_pred == func_target
).float().sum().item()
results['func_total'] = ln
results['b_corr'] = (
(cparam_pred == bool_target).float() * bool_mask
).sum().item()
results['b_total'] = (bool_mask.sum() + 1e-8).item()
results['d_corr'] = (
(dparam_pred == disc_target).float() * disc_mask
).sum().item()
results['d_total'] = (disc_mask.sum() + 1e-8).item()
results['f_prm'] = ((
((cparam_pred - float_target).abs() * float_mask).sum()
) / (float_mask.sum() + 1e-8)).item()
cn = min(len(pred_node['children']), len(gt_node['children']))
if len(pred_node['children']) == len(gt_node['children']):
results['cn_corr'] = 1.
else:
results['cn_corr'] = 0.
results['child_corr'] = 0.
results['child_total'] = cn
for pred_child, gt_child in zip(pred_node['children'][:cn], gt_node['children'][:cn]):
if len(pred_child) == 0 and len(gt_child) == 0:
results['child_corr'] += 1.
elif len(pred_child) > 0 and len(gt_child) > 0:
results['child_corr'] += 1.
child_results = getShapeEvalResult(pred_child, gt_child)
for key in child_results:
results[key] += child_results[key]
return results
def getBatchDecData(progs):
seq = torch.stack([p['seq'] for p in progs],dim=0)
inp_seq = seq[:,:-1,:]
tar_seq = seq[:,1:,:]
seq_weight = torch.stack([p['seq_mask'] for p in progs],dim=0)[:,1:]
fprm_weight = torch.stack([p['fprm_weight'] for p in progs], dim=0)[:, 1:]
children = [p['children'] for p in progs]
child_target = torch.stack([p['child_target'] for p in progs], dim=0)[:, 1:]
child_weight = torch.stack([p['child_weight'] for p in progs], dim=0)[:, 1:]
lnext_inds = ((child_target.bool()) & child_weight.bool()).nonzero().tolist()
cnext_inds = []
for i in range(len(progs)):
for j in progs[i]["exp_inds"]:
cnext_inds.append([i,j])
return inp_seq, tar_seq, seq_weight, fprm_weight, children, \
child_target, child_weight, lnext_inds, cnext_inds
# GRU recurrent Decoder
class dslDecoder(nn.Module):
def __init__(self, hidden_dim, metadata):
super(dslDecoder, self).__init__()
self.hidden_dim = hidden_dim
self.metadata = metadata
self.input_dim = metadata['tl_size']
self.bb_net = MLP(hidden_dim, hidden_dim, hidden_dim, 3)
self.inp_net = MLP(self.input_dim + MAX_DEPTH + 3, hidden_dim, hidden_dim, hidden_dim)
self.max_cparams = metadata['max_cparams']
self.num_funcs = len(metadata['cparam_map'])
self.gru = nn.GRU(hidden_dim, hidden_dim, batch_first = True)
self.max_children = metadata['max_children']
self.child_net = MLP(hidden_dim * 2, hidden_dim//2, hidden_dim//8, self.max_children)
on = self.max_children * hidden_dim
self.next_code_net = MLP(hidden_dim * 2, hidden_dim, hidden_dim, on)
tl_map = metadata['tl_map']
func_net = MLP(hidden_dim, hidden_dim//2, hidden_dim//4, self.num_funcs)
start, end = tl_map['func']
func_net.start = start
func_net.end = end
func_net.line_cond = None
func_net.bb_cond = None
func_net.name = 'func'
func_net.func = None
func_net._type = 'func'
net_list = [func_net]
for _,func in metadata['func_map'].items():
if len(metadata['dparam_map'][func]) > 0:
low = 1e8
high = -1e8
for i, prm in enumerate(metadata['dparam_map'][func]):
start, end = tl_map[f'{func}_{prm}_{i}']
mlp = MLP(hidden_dim, hidden_dim //2, hidden_dim//4, end-start)
mlp.start = start
mlp.end = end
mlp.line_cond = None
mlp.bb_cond = False
mlp.func = func
mlp.name = f'{func}_{prm}_{i}'
mlp._type = 'disc'
net_list.append(mlp)
low = min(low, start)
high = max(high, end)
else:
low = None
high = None
if f'{func}_f' in tl_map:
start, end = tl_map[f'{func}_f']
if low is None or high is None:
mlp = MLP(hidden_dim + 3, hidden_dim //2, hidden_dim//4, end-start)
mlp.line_cond = None
else:
mlp = MLP(hidden_dim + 3 + high-low, hidden_dim //2, hidden_dim//4, end-start)
mlp.line_cond = (low, high)
mlp.start = start
mlp.end = end
mlp.bb_cond = True
mlp.func = func
mlp.name = f'{func}_f'
mlp._type = 'f'
net_list.append(mlp)
if f'{func}_b' in tl_map:
start, end = tl_map[f'{func}_b']
mlp = MLP(hidden_dim, hidden_dim //2, hidden_dim//4, end-start)
mlp.start = start
mlp.end = end
mlp.line_cond = None
mlp.bb_cond = False
mlp.func = func
mlp.name = f'{func}_b'
mlp._type = 'b'
net_list.append(mlp)
self.net_list = nn.ModuleList(net_list)
def train_forward(self, inp_seq, code, _bb_dims, _hier_ind, gt_seq):
bb_dims = _bb_dims.unsqueeze(1).repeat(1,inp_seq.shape[1],1)
hier_oh = torch.zeros(
inp_seq.shape[0], inp_seq.shape[1], MAX_DEPTH, device=device
)
hier_oh[
torch.arange(inp_seq.shape[0],device=device),
:,
_hier_ind
] = 1.0
inp = self.inp_net(
torch.cat(
(inp_seq, bb_dims, hier_oh), dim=2)
)
gru_out, _ = self.gru(inp, code.unsqueeze(0).contiguous())
fstart, fend = self.metadata['tl_map']['func']
commands = torch.argmax(gt_seq[:,:,fstart:fend], dim = 2).flatten()
flat_out = torch.zeros(commands.shape[0], inp_seq.shape[2], device=device).float()
flat_gt_seq = gt_seq.reshape(commands.shape[0], -1)
flat_bb_dims = bb_dims.reshape(commands.shape[0], -1)
flat_gru_out = gru_out.reshape(commands.shape[0], -1)
for net in self.net_list:
if net.func is None:
# Func net
flat_out[:,net.start:net.end] = net(flat_gru_out)
else:
cmd_inds = (commands == net.func).nonzero().flatten()
if net.line_cond is not None:
line_cond = flat_gt_seq[cmd_inds, net.line_cond[0]:net.line_cond[1]]
else:
line_cond = torch.zeros(cmd_inds.shape[0], 0, device=device)
if net.bb_cond is True:
bb_cond = flat_bb_dims[cmd_inds,:]
else:
bb_cond = torch.zeros(cmd_inds.shape[0], 0, device=device)
flat_out[cmd_inds, net.start:net.end] = net(torch.cat((
flat_gru_out[cmd_inds,:], line_cond, bb_cond
), dim=1))
out = flat_out.view(inp_seq.shape)
double_enc = torch.cat((
gru_out, code.unsqueeze(1).repeat(1, gru_out.shape[1], 1)
), dim = 2)
child_pred = self.child_net(
double_enc
)
next_codes = self.next_code_net(
double_enc
).view(inp_seq.shape[0], inp_seq.shape[1], self.max_children, -1)
return out, next_codes, child_pred
def calc_loss(self, out, pchild, tar, child_tar, seq_weight, fprm_weight, child_weight):
result_map = {}
result_map['f_prm'] = weighted_mae_loss(out, tar, fprm_weight)
tl_map = self.metadata['tl_map']
fstart, fend = tl_map['func']
with torch.no_grad():
commands = torch.argmax(tar[:,:,fstart:fend], dim = 2).flatten()
pcommands = torch.argmax(out[:,:,fstart:fend], dim = 2).flatten()
result_map['func_corr'] = (
(commands == pcommands).float() * seq_weight.flatten()
).sum().item() * 1.0
result_map['func_total'] = seq_weight.sum().item()
result_map['func'] = (celoss(
out[:,:,fstart:fend].view(-1,fend-fstart),
commands
) * seq_weight.flatten()).sum()
result_map['child'] = (closs(pchild, child_tar) * child_weight).sum()
result_map['c_corr'] = (((pchild >= 0).float() == child_tar).float() * child_weight).sum().item()
result_map['c_total'] = (child_weight.sum() + 1e-8).item()
b_prm = torch.tensor(0,device=device).float()
b_corr = 0
b_total = 0
d_prm = torch.tensor(0,device=device).float()
d_corr = 0
d_total = 0
for key, (start, end) in tl_map.items():
if key == 'func':
continue
cmd = int(key.split('_')[0])
typ = key.split('_')[1]
if typ == 'f':
continue
cmd_mask = (commands == cmd).float().flatten()
if cmd_mask.sum() == 0:
continue
if typ == 'i' or typ == 'sq' or typ == 'sym':
with torch.no_grad():
ktar = torch.argmax(tar[:,:,start:end], dim=2).flatten()
kpout = torch.argmax(out[:,:,start:end], dim=2).flatten()
d_corr += (
(kpout == ktar).float() * cmd_mask
).sum().item() * 1.0
d_total += cmd_mask.sum().item()
d_prm += (celoss(
out[:,:,start:end].view(-1, end-start),
ktar
) * cmd_mask).sum()
elif typ == 'b':
with torch.no_grad():
ktar = tar[:, :, start:end].reshape(-1, end-start)
kpout = (out[:,:, start:end].reshape(-1, end-start) >= 0).float()
b_corr += (
(kpout == ktar).float() * cmd_mask.unsqueeze(-1)
).sum().item() * 1.0
b_total += cmd_mask.sum().item() * (end-start)
b_prm += (closs(
out[:,:,start:end].reshape(-1, end-start),
ktar
) * cmd_mask.unsqueeze(-1)).sum()
result_map['b_prm'] = b_prm
result_map['b_corr'] = b_corr + 1e-8
result_map['b_total'] = b_total + 1e-8
result_map['d_prm'] = d_prm
result_map['d_corr'] = d_corr + 1e-8
result_map['d_total'] = d_total + 1e-8
result_map['f_norm'] = (loss_config['f_prm'] * (fprm_weight.sum() + 1e-8)).item() *1.0
return result_map
def getStartLine(self):
l = torch.zeros(1,1,self.metadata['tl_size'],device=device).float()
l[0,0,0] = 1.0
return l
def decode_line(self, line):
_cparam = torch.zeros(self.metadata['max_cparams'], device=device).float()
_dparam = torch.zeros(self.metadata['max_dparams'], device=device).long()
fstart, fend = self.metadata['tl_map']['func']
cmd = line[fstart:fend].argmax().item()
float_preds = []
bool_preds = []
tl_map = self.metadata['tl_map']
if f'{cmd}_f' in tl_map:
fstart, fend = tl_map[f'{cmd}_f']
float_preds = line[fstart:fend].tolist()
if f'{cmd}_b' in tl_map:
bstart, bend = tl_map[f'{cmd}_b']
bool_preds = line[bstart:bend].tolist()
for i,prm in enumerate(self.metadata['cparam_map'][cmd]):
if prm == 'f':
v = float_preds.pop(0)
elif prm == 'b':
v = bool_preds.pop(0)
_cparam[i] = v
for i, prm in enumerate(self.metadata['dparam_map'][cmd]):
istart, iend = tl_map[f'{cmd}_{prm}_{i}']
v = torch.argmax(line[istart:iend]).item()
_dparam[i] = v
return cmd, _cparam, _dparam
def split_lines(self, lines):
p_func = []
p_cparam = []
p_dparam = []
for line in lines:
_f, _c, _d = self.decode_line(line)
p_func.append(_f)
p_cparam.append(_c)
p_dparam.append(_d)
return p_func, p_cparam, p_dparam
def eval_forward(self, inp_seq, code, code_start, bb_dims, hier_ind):
bb_dims = bb_dims.unsqueeze(0).unsqueeze(0).repeat(1,inp_seq.shape[1],1)
hier_oh = torch.zeros(1, inp_seq.shape[1], MAX_DEPTH).to(device)
hier_oh[0, :, min(hier_ind, 2)] = 1.0
inp = self.inp_net(
torch.cat(
(inp_seq, bb_dims, hier_oh), dim=2)
)
gru_out, h = self.gru(inp, code.view(1,1,-1))
out = torch.zeros(inp_seq.shape, device=device).float()
commands = None
for net in self.net_list:
if net.func is not None:
assert commands is not None
if net.func != commands:
continue
if net.line_cond is not None:
line_cond = out[:,:,net.line_cond[0]:net.line_cond[1]]
else:
line_cond = torch.zeros(inp_seq.shape[0], inp_seq.shape[1], 0, device=device)
if net.bb_cond is True:
bb_cond = bb_dims
else:
bb_cond = torch.zeros(inp_seq.shape[0], inp_seq.shape[1], 0, device=device)
raw_out = net(torch.cat((
gru_out, line_cond, bb_cond
), dim=2))
if net._type == 'func':
cmd = torch.argmax(raw_out.squeeze()).item()
out[0,0, net.start+cmd] = 1.0
assert commands == None
commands = cmd
elif net._type == 'disc':
m = torch.argmax(raw_out.squeeze()).item()
out[0,0, net.start+m] = 1.0
elif net._type == 'b':
r = (raw_out.squeeze() >= 0.).float()
out[0,0,net.start:net.end] = r
elif net._type == 'f':
bb_max = bb_cond.max().item()
r = torch.clamp(raw_out.squeeze(), 0.0, 10.)
out[0,0,net.start:net.end] = r
double_enc = torch.cat((
gru_out, code_start.repeat(1, gru_out.shape[1], 1)
), dim = 2)
child_pred = self.child_net(
double_enc
)
next_codes = self.next_code_net(
double_enc
).view(inp_seq.shape[0], inp_seq.shape[1], self.max_children, -1)
return out, next_codes, child_pred, h
def eval_prog(self, code, node = None):
if node is None:
bb_dims = self.bb_net(code)
node = {
'depth': 0,
'bb_dims': bb_dims
}
if node['depth'] > MAX_DEPTH:
node.pop('depth')
return
h = code.view(1,1, -1)
h_start = h.clone()
inp = self.getStartLine()
out_lines = []
children = []
for i in range(MAX_LINES):
inp, pnext, pchild, h = self.eval_forward(
inp, h, h_start, node['bb_dims'], node['depth']
)
clean_out = inp.squeeze()
fstart, fend = self.metadata['tl_map']['func']
func_ind = torch.argmax(clean_out[fstart:fend]).item()
if func_ind == 1:
break
out_lines.append(clean_out)
child_pred = pchild[0][0]
next_codes = pnext[0][0]
_, _c, _ = self.decode_line(clean_out)
cube_dims = getCuboidDims(
self.metadata['dsl'].library[
self.metadata['rev_func_map'][func_ind]
],
_c,
self.metadata['jparam_map'][func_ind],
node['bb_dims']
)
cube_dims = torch.tensor(cube_dims,device=child_pred.device)
for i in range(self.metadata['num_cube_map'][func_ind]):
if child_pred[i].item() >= 0.0:
child = {
'depth': node['depth']+1,
'bb_dims': cube_dims[i]
}
children.append(child)
self.eval_prog(next_codes[i], child)
else:
children.append({})
node['children'] = children
out_funcs, out_cprms, out_dprms = self.split_lines(out_lines)
node[FUNC_PRED_FIELD] = torch.tensor(out_funcs)
node[CPARAM_PRED_FIELD] = torch.stack(out_cprms) if len(out_cprms) > 0 else torch.tensor([])
node[DPARAM_PRED_FIELD] = torch.stack(out_dprms) if len(out_dprms) > 0 else torch.tensor([])
return node
def train_progs(self, batch, codes, loss_config):
result_map = {key:0. for key in loss_config}
bbox_target = torch.stack([b['bbox_gt'] for b in batch], dim=0)
bbox_pred = self.bb_net(codes)
bbox_loss = (bbox_target - bbox_pred).abs().sum()
result_map['bbox'] = bbox_loss
qp = batch
qe = codes
qbb = bbox_target
qhi = torch.zeros(len(batch), device=device).long()
while len(qp) > 0:
bs = min(len(batch), len(qp))
bprogs = qp[:bs]
bencs = qe[:bs]
bbb = qbb[:bs]
bhi = qhi[:bs]
qp = qp[bs:]
qe = qe[bs:]
qbb = qbb[bs:]
qhi = qhi[bs:]
inp_seq, tar_seq, seq_weights, fprm_weights, children, \
child_targets, child_weights, lnext_inds, cnext_inds = getBatchDecData(bprogs)
pout, pnext, pchild = self.train_forward(
inp_seq, bencs, bbb, bhi, tar_seq
)
_result = self.calc_loss(
pout,
pchild,
tar_seq,
child_targets,
seq_weights,
fprm_weights,
child_weights
)
for key in _result:
if key in result_map:
result_map[key] += _result[key]
else:
result_map[key] = _result[key]
_qp = []
_qe = []
_qbb = []
_qhi = []
for ((li, lj, lk), (ci, cj)) in zip(lnext_inds, cnext_inds):
_qp.append(children[ci][cj])
_qe.append(pnext[li, lj, lk])
_qbb.append(children[ci][cj]['bbox_gt'])
_qhi.append(bhi[li]+1)
if len(_qp) > 0:
qe = torch.cat((qe, torch.stack(_qe)), dim = 0)
qbb = torch.cat((qbb, torch.stack(_qbb).to(device)), dim = 0)
qp += _qp
qhi = torch.cat((qhi, torch.stack(_qhi)), dim = 0)
return result_map
def writeConfigFile(args):
os.system(f'mkdir {outpath} > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name} > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/plots > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/plots/train > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/plots/eval > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/plots/gen > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/programs > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/programs/train > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/programs/val > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/programs/gen > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/programs/gt > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/objs > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/objs/train > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/objs/val > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/objs/gen > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/objs/gt > /dev/null 2>&1')
os.system(f'mkdir {outpath}/{args.exp_name}/models > /dev/null 2>&1')
with open(f"{outpath}/{args.exp_name}/config.txt", "w") as f:
f.write(f"{args}\n")
def get_max_sq_len(nodes):
m = 0
q = [n for _,n in nodes]
while len(q) > 0:
n = q.pop(0)
l = n['func_gt'].shape[0]
m = max(m, l)
for c in n['children']:
if len(c) > 0:
q.append(c)
return m
def addTargets(node, metadata):
gt_funcs = node['func_gt']
gt_cparams = node['cparam_gt']
gt_dparams = node['dparam_gt']
node['bbox_gt'] = torch.from_numpy(node['cparam_gt'][1][:3]).float().to(device)
seq = torch.zeros(SEQ_LEN, metadata['tl_size'], device=device).float()
fprm_weight = torch.zeros(SEQ_LEN, metadata['tl_size'], device=device).float()
seq_mask = torch.zeros(SEQ_LEN, device=device).float()
seq_end = 0
for i, fn in enumerate(gt_funcs.tolist()):
seq_mask[i] = 1.0
seq_end = i
line = torch.zeros(metadata['tl_size'], device=device).float()
weight = torch.zeros(metadata['tl_size'], device=device).float()
line[fn] = 1.0
float_vals = []
bool_vals = []
for j, tp in enumerate(metadata['cparam_map'][fn]):
if tp == 'f':
float_vals.append(gt_cparams[i][j].item())
elif tp == 'b':
bool_vals.append(gt_cparams[i][j].item())
else:
assert False, f'bad type {tp}'
if len(float_vals) > 0:
start, end = metadata['tl_map'][f'{fn}_f']
line[start:end] = torch.tensor(float_vals, device=device)
weight[start:end] = 1.0
if len(bool_vals) > 0:
start, end = metadata['tl_map'][f'{fn}_b']
line[start:end] = torch.tensor(bool_vals, device=device)
for j, prm in enumerate(metadata['dparam_map'][fn]):
tar = int(gt_dparams[i][j].item())
start, end = metadata['tl_map'][f'{fn}_{prm}_{j}']
line[start+tar] = 1.0
seq[i] = line
fprm_weight[i] = weight
node['seq'] = seq
node['fprm_weight'] = fprm_weight
node['seq_end'] = torch.tensor([seq_end],device=device).long()
node['seq_mask'] = seq_mask
child_target = torch.zeros(SEQ_LEN, metadata['max_children'], device=device).float()
child_weight = torch.zeros(SEQ_LEN, metadata['max_children'], device=device).float()
child_enc_mask = torch.zeros(SEQ_LEN, device = device).float()
child_to_ln_map = {}
for i, inds in enumerate(node['child_gt']):
for j, cind in enumerate(inds):
child_weight[i, j] = 1.0
child_to_ln_map[cind] = (i, j)
if len(node['children'][cind]) > 0:
child_target[i,j] = 1.0
child_enc_mask[i] = 1.0
node['child_target'] = child_target
node['child_weight'] = child_weight
node['child_to_ln_map'] = child_to_ln_map
node['child_enc_mask'] = child_enc_mask
node['exp_inds'] = []
for i, child in enumerate(node['children']):
if len(child) > 0:
node['exp_inds'].append(i)
addTargets(child, metadata)
gt_cparams = node['cparam_gt']
gt_dparams = node['dparam_gt']
bool_target = torch.zeros(gt_funcs.shape[0], metadata['max_cparams'], device=device)
bool_mask = torch.zeros(gt_funcs.shape[0], metadata['max_cparams'], device=device)
float_target = torch.zeros(gt_funcs.shape[0], metadata['max_cparams'], device=device)
float_mask = torch.zeros(gt_funcs.shape[0], metadata['max_cparams'], device=device)
for i, tf in enumerate(gt_funcs):
for j, tp in enumerate(metadata['cparam_map'][tf]):
if tp == 'f':
float_target[i][j] = gt_cparams[i][j].item()
float_mask[i][j] = 1.0
elif tp == 'b':
bool_target[i][j] = gt_cparams[i][j].item()
bool_mask[i][j] = 1.0
else:
assert False, f'bad type {tp}'
disc_target = torch.zeros(gt_funcs.shape[0], metadata['max_dparams'], device=device).long()
disc_mask = torch.zeros(gt_funcs.shape[0], metadata['max_dparams'], device=device)
for i, tf in enumerate(gt_funcs):
for j, _ in enumerate(metadata['dparam_map'][tf]):
disc_target[i][j] = gt_dparams[i][j].item()
disc_mask[i][j] = 1.0
node['e_func_gt'] = torch.tensor(node['func_gt'], device=device).long()
node['e_cparam_gt'] = torch.tensor(node['cparam_gt'], device=device).float()
node['e_dparam_gt'] = torch.tensor(node['dparam_gt'], device=device).long()
node['e_bool_target'] = bool_target.float()
node['e_float_target'] = float_target.float()
node['e_disc_target'] = disc_target.long()
node['e_bool_mask'] = bool_mask.float()
node['e_float_mask'] = float_mask.float()
node['e_disc_mask'] = disc_mask.float()
def _col(samples):
return samples
def _bcol(samples):
progs = [s[1] for s in samples]
return progs
# Full encoder + decoder training logic for a single program (i.e. a batch)
def model_train(batch, encoder, decoder, enc_opt, dec_opt, loss_config):
codes, kl_loss = get_encodings(batch, encoder, 'kl' in loss_config)
shape_result = decoder.train_progs(batch, codes, loss_config)
if 'kl' in loss_config:
shape_result['kl'] = kl_loss
loss = 0.
for key in loss_config:
loss += (loss_config[key] * shape_result[key]) / len(batch)
shape_result[key] = (loss_config[key] * shape_result[key].item()) / len(batch)
if torch.is_tensor(loss) and enc_opt is not None and dec_opt is not None:
dec_opt.zero_grad()
enc_opt.zero_grad()
loss.backward()
dec_opt.step()
enc_opt.step()
shape_result['loss'] = loss.item()
return shape_result
def model_train_results(dataset, encoder, decoder, enc_opt, dec_opt, loss_config):
if enc_opt is not None and dec_opt is not None:
decoder.train()
encoder.train()
else:
decoder.eval()
encoder.eval()
ep_result = {}
bc = 0.
for batch in dataset:
bc += 1.
batch_result = model_train(
batch, encoder, decoder, dec_opt, enc_opt, loss_config
)
for key in batch_result:
if key not in ep_result:
ep_result[key] = batch_result[key]
else:
ep_result[key] += batch_result[key]
ep_result['batch_count'] = bc
return ep_result
def model_eval(
eval_train_dataset, eval_val_dataset, encoder, decoder, exp_name, epoch, num_write, metadata
):
eval_results = {}
for name, dataset in [('train', eval_train_dataset), ('val', eval_val_dataset)]:
if len(dataset) == 0:
eval_results[name] = {}
continue
named_results = {
'count': 0.,
'miss_hier_prog': 0.,
'no_norm': 1.0
}
recon_sets = []
for batch in dataset:
assert len(batch) == 1, 'batch size 1'
shape = batch[0]
named_results['count'] += 1.
# Always get maximum likelihood estimation (i.e. mean) of shape encoding at eval time
code, _ = get_encodings([shape[1]], encoder, variational=False)
if USE_SEM_VALID:
node = sv.sem_eval_prog(decoder, code.squeeze())
else:
node = decoder.eval_prog(code.squeeze())
try:
shape_result = getShapeEvalResult(node, shape[1])
shape_result['bbox'] = (node['bb_dims'] - shape[1]['bbox_gt']).abs().sum().item()
except Exception as e:
if VERBOSE:
print(f"FAILED SHAPE EVAL RESULT WITH {e}")
shape_result = {}
for key in shape_result:
if key not in named_results:
named_results[key] = shape_result[key]
else:
named_results[key] += shape_result[key]
try:
fillProgram(
metadata['dsl'],
node,
metadata,
FUNC_PRED_FIELD,
CPARAM_PRED_FIELD,
DPARAM_PRED_FIELD,
)
recon_sets.append((node, shape[1], shape[0]))
except Exception as e:
if VERBOSE:
print(f"Failed Recon Program with {e}")
named_results[f'miss_hier_prog'] += 1.
# For reconstruction, get metric performance
recon_results, recon_misses = recon_metrics.recon_metrics(
recon_sets, outpath, exp_name, name, epoch, VERBOSE, num_write + 1
)
for key in recon_results:
named_results[key] = recon_results[key]
named_results[f'miss_hier_prog'] += recon_misses
named_results[f'prog_creation_perc'] = (
named_results[f'count'] - named_results[f'miss_hier_prog']
) / named_results[f'count']
eval_results[name] = named_results
return eval_results
def model_gen(decoder, num_gen, metadata, exp_name, epoch, num_write, train_samps, val_samps):
gen_progs = []
gen_prog_fails = 0.
# Also generate a set of unconditional ShapeAssembly Programs
for i in range(0, num_gen):
code = torch.randn(args.hidden_dim).to(device)
if USE_SEM_VALID:
node = sv.sem_eval_prog(decoder, code.squeeze())
else:
node = decoder.eval_prog(code.squeeze())
try:
fillProgram(
metadata['dsl'],
node,
metadata,
FUNC_PRED_FIELD,
CPARAM_PRED_FIELD,
DPARAM_PRED_FIELD,
)
gen_progs.append(node)
except Exception as e:
gen_prog_fails += 1.
if VERBOSE:
print(f"Failed generating new program with {e}")
# Get metrics for unconditional generations
gen_results, gen_misses = gen_metrics.gen_metrics(
gen_progs, outpath, exp_name, epoch, VERBOSE, num_write, train_samps, val_samps
)
gen_results['prog_creation_perc'] = (num_gen - gen_misses - gen_prog_fails) / (num_gen + 1e-8)
return gen_results
def print_train_results(result, exp_name):
res = ""
for name, key, norm_key in TRAIN_LOG_INFO:
if key in result:
res += f" {name} : {round(result[key] / (result[norm_key]+1e-8), 2)}\n"
utils.log_print(res, f"{outpath}/{exp_name}/log.txt")
def print_eval_results(result, exp_name):
res = ""
for name, key, norm_key in EVAL_LOG_INFO:
if key in result:
res += f" {name} : {round(result[key] / (result[norm_key]+1e-8), 2)}\n"
utils.log_print(res, f"{outpath}/{exp_name}/log.txt")
def print_gen_results(result, exp_name):
res = ""
for name, key in GEN_LOG_INFO:
if key in result:
res += f" {name} : {round(result[key], 2)}\n"
utils.log_print(res, f"{outpath}/{exp_name}/log.txt")
def make_train_plots(train_result, val_result, train_plots, aepochs, exp_name):
for name, key, norm_key in TRAIN_LOG_INFO:
for rname, result in [('train', train_result), ('val', val_result)]:
if key not in result:
continue
res = result[key] / (result[norm_key]+1e-8)
if name not in train_plots[rname]:
train_plots[rname][name] = [res]
else:
train_plots[rname][name].append(res)
if name not in train_plots['train']:
continue
plt.clf()
plt.plot(aepochs, train_plots['train'][name], label='train')
if name in train_plots['val']:
plt.plot(aepochs, train_plots['val'][name], label='val')
plt.legend()
plt.grid()
plt.savefig(f"{outpath}/{exp_name}/plots/train/{name}.png")
def make_eval_plots(eval_result, eval_plots, aepochs, exp_name):
for name, key, norm_key in EVAL_LOG_INFO:
for rname, result in [('train', eval_result['train']), ('val', eval_result['val'])]:
if key not in result:
continue
res = result[key] / (result[norm_key]+1e-8)
if not name in eval_plots[rname]:
eval_plots[rname][name] = [res]
else:
eval_plots[rname][name].append(res)
if name not in eval_plots['train']:
continue
plt.clf()
plt.plot(aepochs, eval_plots['train'][name], label='train')
if name in eval_plots['val']:
plt.plot(aepochs, eval_plots['val'][name], label='val')
plt.legend()
plt.grid()
plt.savefig(f"{outpath}/{exp_name}/plots/eval/{name}.png")
def make_gen_plots(gen_result, gen_plots, aepochs, exp_name):
for name, key in GEN_LOG_INFO:
if key not in gen_result:
continue
res = gen_result[key]
if not name in gen_plots:
gen_plots[name] = [res]
else:
gen_plots[name].append(res)
plt.clf()
plt.plot(aepochs, gen_plots[name])
plt.grid()
plt.savefig(f"{outpath}/{exp_name}/plots/gen/{name}.png")
# Helper function for keeping consistent train/val splits
def getInds(train_ind_file):
inds = set()
with open(train_ind_file) as f:
for line in f:
inds.add(line.strip())
return inds
def get_tensor_layout(metadata):
# Figure out size of tensor
# map from (func, type) -> indices in tensor
tl_map = {}
size = len(metadata['func_map'])
tl_map['func'] = (0, size)
start = size
size += (I_LENGTH * metadata['max_d_i_params']) \
+ (SQ_LENGTH * metadata['max_d_sq_params']) \
+ (SYM_LENGTH * metadata['max_d_sym_params'])
for func, prms in metadata['dparam_map'].items():
i_start = start
sq_start = i_start + I_LENGTH * metadata['max_d_i_params']
sym_start = sq_start + SQ_LENGTH * metadata['max_d_sq_params']
for i, _typ in enumerate(prms):
if _typ == 'i':
opt_len = I_LENGTH
_start = i_start
i_start += opt_len
elif _typ == 'sq':
opt_len = SQ_LENGTH
_start = sq_start
sq_start += opt_len
elif _typ == 'sym':
opt_len = SYM_LENGTH
_start = sym_start
sym_start += opt_len
tl_map[f'{func}_{_typ}_{i}'] = (
_start,
_start + opt_len,
)
for func, prms in metadata['cparam_map'].items():
nf = 0
nb = 0
for prm in prms:
if 'f' in prm:
nf += 1
elif 'b' in prm:
nb += 1
if nf > 0:
size += nf
tl_map[f'{func}_f'] = (size-nf, size)
if nb > 0:
size += nb
tl_map[f'{func}_b'] = (size-nb, size)
return size, tl_map
# Main entry-point of modeling logic
def run_train(
dataset_path,
exp_name,
max_shapes,
epochs,
hidden_dim,
eval_per,
variational,
loss_config,
rd_seed,
print_per,
num_gen,
num_write,
num_eval,
dec_lr,
enc_lr,
gen_per,
save_per,
category,
batch_size
):
random.seed(rd_seed)
numpy.random.seed(rd_seed)
torch.manual_seed(rd_seed)
raw_data = pickle.load(open(f"{dataset_path}_train.data", "rb"))
metadata = pickle.load(open(f"{dataset_path}_train.meta", "rb"))
metadata['max_children'] = max([int(i) for i in metadata['num_cube_map'].values()])
metadata['rev_func_map'] = {v:k for k, v in metadata['func_map'].items()}
for key in ('i', 'sq', 'sym'):
metadata[f'max_d_{key}_params'] = max([len([ __l for __l in _l if __l == key]) for _l in metadata['dparam_map'].values()])
tl_size, tl_map = get_tensor_layout(metadata)
metadata['tl_size'] = tl_size
metadata['tl_map'] = tl_map
all_inds = []
all_data = []
good_inds = []
max_sq_len = get_max_sq_len(raw_data)
print(f"Seq len: {max_sq_len}")
global SEQ_LEN
SEQ_LEN = max_sq_len
for d in raw_data:
if len(all_inds) >= max_shapes:
break
if len(good_inds) > 0 and d[0] not in good_inds:
continue
addTargets(d[1], metadata)
fillProgram(
metadata['dsl'],
d[1],
metadata,
'func_gt',
'cparam_gt',
'dparam_gt'
)
all_data.append(d[1])
all_inds.append(d[0])
inds_and_progs = list(zip(all_inds, all_data))
samples = [(i, d) for i,d in inds_and_progs]
train_ind_file = f'data_splits/{category}/train.txt'
val_ind_file = f'data_splits/{category}/val.txt'
train_samples = []
val_samples = []
train_inds = getInds(train_ind_file)
val_inds = getInds(val_ind_file)
misses = 0.
num_parts = []
for (ind, prog) in samples:
if ind in train_inds or ind in good_inds:
train_samples.append((ind, prog))
elif ind in val_inds:
val_samples.append((ind, prog))
else:
misses += 1
if len(good_inds) > 0:
val_samples = train_samples[:1]
print(f"Samples missed: {misses}")
train_num = len(train_samples)
val_num = len(val_samples)
train_surf_samples, val_surf_samples = gen_metrics.get_gt_surf_samples(category, num_gen)
train_dataset = DataLoader(
train_samples, batch_size, shuffle=True, collate_fn = _bcol
)
eval_train_dataset = DataLoader(
train_samples[:num_eval], 1, shuffle=False, collate_fn = _col
)
val_dataset = DataLoader(
val_samples, batch_size, shuffle = False, collate_fn = _bcol
)
eval_val_dataset = DataLoader(
val_samples[:num_eval//2], 1, shuffle = False, collate_fn = _col
)
utils.log_print(f"Training size: {len(train_samples)}", f"{outpath}/{exp_name}/log.txt")
utils.log_print(f"Validation size: {len(val_samples)}", f"{outpath}/{exp_name}/log.txt")
val_epochs = []
train_epochs = []
gen_epochs = []
train_plots = {'train': {}, 'val': {}}
eval_plots = {'train': {}, 'val': {}}
gen_plots = {}
encoder = dslEncoder(hidden_dim, metadata)
decoder = dslDecoder(
hidden_dim,
metadata,
)
encoder.to(device)
decoder.to(device)
dec_opt = torch.optim.Adam(
decoder.parameters(),
lr = dec_lr,
eps = 1e-6
)
enc_opt = torch.optim.Adam(
encoder.parameters(),
lr = enc_lr,
eps = 1e-6
)
print('training ...')
for e in range(0, epochs):
json.dump({
'train': train_plots,
'eval': eval_plots,
'gen': gen_plots,
'train_epochs': train_epochs,
'val_epochs': val_epochs,
'gen_epochs': gen_epochs
}, open(f"{outpath}/{exp_name}/res.json" ,'w'))
decoder.epoch = e
do_print = (e+1) % print_per == 0
t = time.time()
if do_print:
utils.log_print(f"\nEpoch {e}:", f"{outpath}/{exp_name}/log.txt")
train_result = model_train_results(
train_dataset,
encoder,
decoder,
enc_opt,
dec_opt,
loss_config,
)
if do_print:
with torch.no_grad():
val_result = model_train_results(
val_dataset, encoder, decoder, None, None,
{k:v for k,v in loss_config.items() if k != 'kl'}
)
train_epochs.append(e)
make_train_plots(train_result, val_result, train_plots, train_epochs, exp_name)
utils.log_print(
f"Train results: ", f"{outpath}/{exp_name}/log.txt"
)
print_train_results(train_result, exp_name)
utils.log_print(
f"Val results: ", f"{outpath}/{exp_name}/log.txt"
)
print_train_results(val_result, exp_name)
utils.log_print(
f" Time = {time.time() - t}", f"{outpath}/{exp_name}/log.txt"
)
with torch.no_grad():
if (e+1) % eval_per == 0:
decoder.eval()
encoder.eval()
t = time.time()
eval_results = model_eval(
eval_train_dataset,
eval_val_dataset,
encoder,
decoder,
exp_name,
e,
num_write,
metadata
)
utils.log_print(f"Evaluation training set results:", f"{outpath}/{exp_name}/log.txt")
print_eval_results(eval_results['train'], exp_name)
utils.log_print(f"Evaluation validation set results:", f"{outpath}/{exp_name}/log.txt")
print_eval_results(eval_results['val'], exp_name)
utils.log_print(f"Eval Time = {time.time() - t}", f"{outpath}/{exp_name}/log.txt")
val_epochs.append(e)
make_eval_plots(eval_results, eval_plots, val_epochs, exp_name)
if (e+1) % gen_per == 0:
gen_epochs.append(e)
t = time.time()
gen_result = model_gen(
decoder, num_gen, metadata, exp_name, e, num_write,
train_surf_samples,
val_surf_samples
)
utils.log_print(f"Unconditional Generated Results:", f"{outpath}/{exp_name}/log.txt")
print_gen_results(gen_result, exp_name)
make_gen_plots(gen_result, gen_plots, gen_epochs, exp_name)
utils.log_print(f"Gen Time = {time.time() - t}", f"{outpath}/{exp_name}/log.txt")
if (e+1) % save_per == 0:
utils.log_print("Saving Models", f"{outpath}/{exp_name}/log.txt")
torch.save(decoder.state_dict(), f"{outpath}/{exp_name}/models/decoder_{e}.pt")
torch.save(encoder.state_dict(), f"{outpath}/{exp_name}/models/encoder_{e}.pt")
def make_gens(args):
metadata = pickle.load(open(f"{args.dataset_path}_train.meta", "rb"))
metadata['max_children'] = max([int(i) for i in metadata['num_cube_map'].values()])
metadata['rev_func_map'] = {v:k for k, v in metadata['func_map'].items()}
for key in ('i', 'sq', 'sym'):
metadata[f'max_d_{key}_params'] = max([len([ __l for __l in _l if __l == key]) for _l in metadata['dparam_map'].values()])
tl_size, tl_map = get_tensor_layout(metadata)
metadata['tl_size'] = tl_size
metadata['tl_map'] = tl_map
decoder = dslDecoder(
args.hidden_dim,
metadata,
)
decoder.load_state_dict(torch.load(
f'{args.exp_name}/models/decoder_{args.load_epoch}.pt'
))
decoder.to(device)
decoder.eval()
i = 0
outname = f'{args.exp_name}/gen_output'
os.system(f'mkdir {outname}')
while (i < args.num_gen):
code = torch.randn(args.hidden_dim).to(device)
node = sv.sem_eval_prog(decoder, code)
try:
fillProgram(
metadata['dsl'],
node,
metadata,
FUNC_PRED_FIELD,
CPARAM_PRED_FIELD,
DPARAM_PRED_FIELD
)
sa_lines = utils.sagetHierProgLines(node)
od_lines = utils.getHierProgLines(node, 'dsl_prog')
verts, faces = hier_execute(node)
utils.writeObj(verts, faces, f"{outname}/gen_{i}.obj")
with open(f'{outname}/prog_sa_{i}.txt', 'w') as f:
for line in sa_lines:
f.write(f'{line}\n')
with open(f'{outname}/prog_od_{i}.txt', 'w') as f:
for line in od_lines:
f.write(f'{line}\n')
i += 1
except Exception as e:
print(f"failed {i} with {e}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run Novel Shape Generation model")
parser.add_argument('-ds', '--dataset_path', help='Path to program data, e.g. data/shapemod_chair', type = str)
parser.add_argument('-en', '--exp_name', help='name of experiment', type = str)
parser.add_argument('-c', '--category', type = str, help = 'category of PartNet')
parser.add_argument('-ms', '--max_shapes', default = 100000, type = int, help = 'max number of shapes to train/evaluate on ')
parser.add_argument('-e', '--epochs', default = 5000, type = int, help = 'number of epochs to run for')
parser.add_argument('-hd', '--hidden_dim', default = 256, type = int, help = 'hidden dimension size')
parser.add_argument('-prp', '--print_per', default = 20, type = int, help = 'how often to print out training set statistics')
parser.add_argument('-evp', '--eval_per', default = 100, type = int, help = 'how often to run evaluation statistics')
parser.add_argument('-gp', '--gen_per', default = 100, type = int, help = 'how often to run novel generation statistics')
parser.add_argument('-v', '--variational', default = "True", type = str, help = 'whether model should be a VAE or AE')
parser.add_argument('-enc_lr', '--enc_lr', default = 0.0002, type = float, help = 'encoder learning rate')
parser.add_argument('-dec_lr', '--dec_lr', default = 0.0002, type = float, help = 'decoder learning rate')
parser.add_argument('-rd', '--rd_seed', default = 42, type = int, help = 'random seed')
parser.add_argument('-ng', '--num_gen', default = 1000, type = int, help = 'number of shapes to generate each generation period')
parser.add_argument('-nw', '--num_write', default = 25, type = int, help = 'number of shapes to write to .obj each evaluation period')
parser.add_argument('-ne', '--num_eval', default = 200, type = int, help = 'number of shapes to run eval statistics on each evaluation period')
parser.add_argument('-f_lw', '--f_lw', default = 50., type = float, help = 'weight on loss of continuous parameters')
parser.add_argument('-d_lw', '--d_lw', default = 1., type = float, help = 'weight on loss of discrete parameters')
parser.add_argument('-b_lw', '--b_lw', default = 1., type = float, help = 'weight on loss of boolean parameters')
parser.add_argument('-c_lw', '--c_lw', default = 1., type = float, help = 'weight on loss of child predictions')
parser.add_argument('-fn_lw', '--fn_lw', default = 1., type = float, help = 'weight on loss of function predictions')
parser.add_argument('-kl_lw', '--kl_lw', default = 0.1, type = float, help = 'KL divergence loss weight')
parser.add_argument('-b', '--batch_size', default = 64, type=int, help = 'batch size')
parser.add_argument('-le', '--load_epoch', default = None, type=int, help = 'model epoch to load from pre-trained model')
parser.add_argument('-m', '--mode', default = 'train', type=str, help = 'whether to train new model or generate samples from pre-trained model')
args = parser.parse_args()
args.variational = ast.literal_eval(args.variational)
loss_config = {
'd_prm': args.d_lw,
'f_prm': args.f_lw,
'bbox': args.f_lw,
'b_prm': args.b_lw,
'child': args.c_lw,
'func': args.fn_lw,
}
if args.variational:
loss_config['kl'] = args.kl_lw
if args.mode == 'train':
writeConfigFile(args)
run_train(
dataset_path=args.dataset_path,
exp_name=args.exp_name,
max_shapes=args.max_shapes,
epochs=args.epochs,
hidden_dim=args.hidden_dim,
eval_per=args.eval_per,
variational=args.variational,
loss_config=loss_config,
rd_seed=args.rd_seed,
print_per=args.print_per,
num_gen=args.num_gen,
num_write=args.num_write,
num_eval=args.num_eval,
enc_lr=args.enc_lr,
dec_lr=args.dec_lr,
gen_per=args.gen_per,
save_per=args.gen_per,
category=args.category,
batch_size=args.batch_size
)
elif args.mode == 'gen':
make_gens(args)
else:
print(f"Bad mode {args.mode}")
| 61,405 | 34.826138 | 167 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/recon_metrics.py | from ShapeAssembly import hier_execute
import sa_utils as utils
import torch
import os
import sys
import math
import faiss
import numpy as np
device = torch.device("cuda")
NUM_SAMPS = 10000
V_DIM = 64
CD_MULT = 500.
voxel_inds = ((np.indices((V_DIM, V_DIM, V_DIM)).T + .5) / (V_DIM//2)) -1.
flat_voxel_inds = torch.from_numpy(voxel_inds.reshape(-1, 3)).float()
class SimpChamferLoss(torch.nn.Module):
def __init__(self, device):
super(SimpChamferLoss, self).__init__()
self.dimension = 3
self.gpu_id = torch.cuda.current_device()
self.res = faiss.StandardGpuResources()
def build_nn_index(self, database):
"""
:param database: numpy array of Nx3
:return: Faiss index, in CPU
"""
index_cpu = faiss.IndexFlatL2(self.dimension)
index = faiss.index_cpu_to_gpu(self.res, self.gpu_id, index_cpu)
index.add(database)
return index
def search_nn(self, index, query, k):
D, I = index.search(query, 1)
return np.sqrt(D)
def getAvgDist(self, index, query):
D, I = index.search(query, 2)
m_d = math.sqrt(np.percentile(D[:,1],90))
return m_d
def calc_metrics(self, predict_pc, gt_pc, threshes):
"""
:param predict_pc: Bx3xM Variable in GPU
:param gt_pc: Bx3xN Variable in GPU
:return:
"""
predict_pc_size = predict_pc.size()
gt_pc_size = gt_pc.size()
predict_pc_np = np.ascontiguousarray(
torch.transpose(predict_pc.data.clone(), 1, 2).cpu().numpy()
) # BxMx3
gt_pc_np = np.ascontiguousarray(
torch.transpose(gt_pc.data.clone(), 1, 2).cpu().numpy()
) # BxNx3
index_predict = self.build_nn_index(predict_pc_np[0])
index_gt = self.build_nn_index(gt_pc_np[0])
fwd_dist = self.search_nn(index_gt, predict_pc_np[0], 1)
bwd_dist = self.search_nn(index_predict, gt_pc_np[0], 1)
cd = (fwd_dist.mean() / 2) + (bwd_dist.mean() / 2)
ones = np.ones(fwd_dist.shape)
fscores = []
for thresh in threshes:
if thresh == 'def':
thresh = self.getAvgDist(index_gt, gt_pc_np[0])
precision = (100 / ones.shape[0]) * np.sum(ones[fwd_dist <= thresh])
recall = (100 / ones.shape[0]) * np.sum(ones[bwd_dist <= thresh])
fs = (2*precision*recall) / (precision + recall + 1e-8)
fscores.append(fs)
return [cd] + fscores
class SimpCPUChamferLoss(torch.nn.Module):
def __init__(self):
super(SimpCPUChamferLoss, self).__init__()
self.dimension = 3
def build_nn_index(self, database):
"""
:param database: numpy array of Nx3
:return: Faiss index, in CPU
"""
index = faiss.IndexFlatL2(self.dimension)
index.add(database)
return index
def search_nn(self, index, query, k):
D, I = index.search(query, 1)
return np.sqrt(D)
def getAvgDist(self, index, query):
D, I = index.search(query, 2)
m_d = math.sqrt(np.percentile(D[:,1],90))
return m_d
def calc_metrics(self, predict_pc, gt_pc, threshes):
"""
:param predict_pc: Bx3xM Variable in GPU
:param gt_pc: Bx3xN Variable in GPU
:return:
"""
predict_pc_size = predict_pc.size()
gt_pc_size = gt_pc.size()
predict_pc_np = np.ascontiguousarray(
torch.transpose(predict_pc.data.clone(), 1, 2).cpu().numpy()
) # BxMx3
gt_pc_np = np.ascontiguousarray(
torch.transpose(gt_pc.data.clone(), 1, 2).cpu().numpy()
) # BxNx3
index_predict = self.build_nn_index(predict_pc_np[0])
index_gt = self.build_nn_index(gt_pc_np[0])
fwd_dist = self.search_nn(index_gt, predict_pc_np[0], 1)
bwd_dist = self.search_nn(index_predict, gt_pc_np[0], 1)
cd = (fwd_dist.mean() / 2) + (bwd_dist.mean() / 2)
ones = np.ones(fwd_dist.shape)
fscores = []
for thresh in threshes:
if thresh == 'def':
thresh = self.getAvgDist(index_gt, gt_pc_np[0])
precision = (100 / ones.shape[0]) * np.sum(ones[fwd_dist <= thresh])
recall = (100 / ones.shape[0]) * np.sum(ones[bwd_dist <= thresh])
fs = (2*precision*recall) / (precision + recall + 1e-8)
fscores.append(fs)
return [cd] + fscores
chamfer = SimpCPUChamferLoss()
def voxelize(cube_params):
rotmat = torch.stack((
cube_params[6:9],
cube_params[9:12],
cube_params[12:15]
))
cent_pts = flat_voxel_inds - cube_params[3:6].unsqueeze(0)
rot_pts = (rotmat @ cent_pts.T).T
cube_sdfs = (
rot_pts.abs() - (cube_params[:3].unsqueeze(0) * .5)
).max(dim=1).values
return (cube_sdfs <= (1./V_DIM)).bool()
def shape_voxelize(cubes):
voxels = torch.zeros(64**3).bool().to(flat_voxel_inds.device)
for cube in cubes:
vox_occs = voxelize(cube.getParams().to(flat_voxel_inds.device))
voxels = voxels | vox_occs
return voxels
def getSampMetrics(verts, faces, gt_verts, gt_faces):
p_samps = torch.clamp(
utils.sample_surface(faces, verts.unsqueeze(0), NUM_SAMPS, False)[0],
-1, 1)
t_samps = torch.clamp(
utils.sample_surface(gt_faces, gt_verts.unsqueeze(0), NUM_SAMPS, False)[0],
-1, 1)
samp_metrics = chamfer.calc_metrics(
p_samps.T.unsqueeze(0).float(),
t_samps.T.unsqueeze(0).float(),
[.05, .03, 'def']
)
return {
'cd': samp_metrics[0] * CD_MULT,
'fscore-05': samp_metrics[1],
'fscore-03': samp_metrics[2],
'fscore-def': samp_metrics[3],
}
def getShapeIoU(cubes, gt_cubes):
pvoxels = shape_voxelize(cubes)
tvoxels = shape_voxelize(gt_cubes)
iou = 100 * (
(pvoxels & tvoxels).sum().item()
/ (pvoxels | tvoxels).sum().item()
)
return iou
def recon_metrics(
recon_sets, outpath, exp_name, name, epoch, VERBOSE, num_gen
):
misses = 0.
results = {
'iou': [],
'cd': [],
'fscore-def': [],
'fscore-03': [],
'fscore-05': [],
}
count = 0
for prog, gt_prog, prog_ind in recon_sets:
gt_verts, gt_faces, gt_cubes = hier_execute(gt_prog, return_all = True)
try:
verts, faces, cubes = hier_execute(prog, return_all = True)
assert not torch.isnan(verts).any(), 'saw nan vert'
except Exception as e:
misses += 1.
if VERBOSE:
print(f"failed recon metrics for {prog_ind} with {e}")
continue
gt_objs = os.listdir(f"{outpath}/{exp_name}/objs/gt/")
try:
sm = getSampMetrics(verts, faces, gt_verts, gt_faces)
for k, v in sm.items():
if v is not None:
results[k].append(v)
except Exception as e:
if VERBOSE:
print(f"failed Samp Metrics for {prog_ind} with {e}")
try:
iou = getShapeIoU(cubes, gt_cubes)
if iou is not None:
results['iou'].append(iou)
except Exception as e:
if VERBOSE:
print(f"failed Shape Iou for {prog_ind} with {e}")
if count >= num_gen:
continue
if f"{prog_ind}.obj" not in gt_objs:
utils.writeObj(gt_verts, gt_faces, f"{outpath}/{exp_name}/objs/gt/{prog_ind}.obj")
if 'dsl_prog' in gt_prog:
utils.writeHierProg(gt_prog, 'dsl_prog', f"{outpath}/{exp_name}/programs/gt/{prog_ind}.txt")
else:
utils.sawriteHierProg(gt_prog, f"{outpath}/{exp_name}/programs/gt/{prog_ind}.txt")
try:
utils.writeObj(
verts, faces, f"{outpath}/{exp_name}/objs/{name}/{epoch}_{prog_ind}.obj"
)
if 'dsl_prog' in prog:
utils.writeHierProg(
prog, 'dsl_prog', f"{outpath}/{exp_name}/programs/{name}/{epoch}_{prog_ind}.txt"
)
else:
utils.sawriteHierProg(
prog, f"{outpath}/{exp_name}/programs/{name}/{epoch}_{prog_ind}.txt"
)
count += 1
except Exception as e:
if VERBOSE:
print(f"Failed writing prog/obj for {prog_ind} with {e}")
for key in results:
if len(results[key]) > 0:
res = torch.tensor(results[key]).mean().item()
else:
res = 0.
results[key] = res
return results, misses
def simp_recon_metrics(
recon_sets
):
results = {
'iou': [],
'cd': [],
'fscore-def': [],
'fscore-03': [],
'fscore-05': [],
}
for prog, gt_prog in recon_sets:
_results = {k:None for k in results}
try:
gt_verts, gt_faces, gt_cubes = hier_execute(gt_prog, return_all = True)
verts, faces, cubes = hier_execute(prog, return_all = True)
assert not torch.isnan(verts).any(), 'saw nan vert'
sm = getSampMetrics(verts, faces, gt_verts, gt_faces)
for k, v in sm.items():
_results[k] = v.item()
iou = getShapeIoU(cubes, gt_cubes)
_results['iou'] = iou
except Exception as e:
print(f"Faile with {e}")
for k in _results:
results[k].append(_results[k])
return results
| 9,981 | 30.488959 | 108 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/gen_metrics.py | from ShapeAssembly import hier_execute, ShapeAssembly, make_hier_prog
import sa_utils as utils
import torch
import os
import sys
from valid import check_stability, check_rooted
from pointnet_fd import get_fd
from recon_metrics import chamfer, CD_MULT
from tqdm import tqdm
import faiss
import numpy as np
NUM_SAMPS = 2500
CD_NUM_SAMPS = 1024
device = torch.device("cuda")
#device = torch.device("cpu")
MAX_ROOTED_STABLE = 200
MAX_VARI = 200
sa = ShapeAssembly()
class CDPairs(torch.nn.Module):
def __init__(self, device, mem = 100 * 1024 * 1024):
super(CDPairs, self).__init__()
self.gpu_id = torch.cuda.current_device()
self.res = faiss.StandardGpuResources()
self.res.noTempMemory()
self.res.setTempMemory(mem)
self.dimension = 3
def build_nn_index(self, database):
"""
:param database: numpy array of Nx3
:return: Faiss index, in CPU
"""
index_cpu = faiss.IndexFlatL2(self.dimension)
index = faiss.index_cpu_to_gpu(self.res, self.gpu_id, index_cpu)
index.add(database)
return index
def search_nn(self, index, query, k):
D, I = index.search(query, k)
return np.sqrt(D)
# B x D x 3 inputs
def calc_cd(self, source, target, skip_same=False):
source = np.ascontiguousarray(source.cpu().numpy())
target = np.ascontiguousarray(target.cpu().numpy())
d_source = []
d_target = []
for i in range(source.shape[0]):
d_source.append(self.build_nn_index(source[i]))
for i in range(target.shape[0]):
d_target.append(self.build_nn_index(target[i]))
min_dists = []
for i in tqdm(range(source.shape[0])):
min_dist = 1e8
for j in range(target.shape[0]):
if i == j and skip_same:
continue
fwd_dist = self.search_nn(d_target[j], source[i], 1)
bwd_dist = self.search_nn(d_source[i], target[j], 1)
dist = fwd_dist.mean() + bwd_dist.mean()
if dist < min_dist:
min_dist = dist
min_dists.append(min_dist)
return torch.tensor(min_dists).mean().item()
cdpairs = CDPairs(device)
def getGTSamples(progs, num):
data = []
for prog in tqdm(progs[:num]):
verts, faces = hier_execute(prog)
verts = verts.to(device)
faces = faces.to(device)
try:
samps = utils.sample_surface(faces, verts.unsqueeze(0), NUM_SAMPS, False).squeeze()
data.append(samps)
except Exception as e:
print(f"Failed GT samples with {e}")
return data
def getSamples(meshes, VERBOSE, num_samps = NUM_SAMPS):
data = []
for verts, faces in tqdm(meshes):
verts = verts.to(device)
faces = faces.to(device)
try:
samps = utils.sample_surface(faces, verts.unsqueeze(0), num_samps, False).squeeze()
data.append(samps)
except Exception as e:
if VERBOSE:
print(f"couldn't sample with {e}")
return data
# Helper function for keeping consistent train/val splits
def getInds(train_ind_file):
inds = set()
with open(train_ind_file) as f:
for line in f:
inds.add(line.strip())
return inds
def get_gt_surf_samples(category, num = 1000):
train_ind_file = f'data_splits/{category}/train.txt'
val_ind_file = f'data_splits/{category}/val.txt'
os.system('mkdir gen_comps')
os.system('mkdir gen_comps/gt_val')
os.system('mkdir gen_comps/gt_train/')
os.system(f'mkdir gen_comps/gt_val/{category}')
os.system(f'mkdir gen_comps/gt_train/{category}')
gt_train_files = set(os.listdir(f'gen_comps/gt_train/{category}/'))
gt_val_files = set(os.listdir(f'gen_comps/gt_val/{category}/'))
train_samples = []
val_samples = []
train_inds = list(getInds(train_ind_file))
val_inds = list(getInds(val_ind_file))
train_samples = []
val_samples = []
for ind in tqdm(train_inds[:num]):
if ind+'.obj' in gt_train_files:
verts, faces = utils.loadObj(f'gen_comps/gt_train/{category}/{ind}.obj')
verts = torch.tensor(verts).float().to(device)
faces = torch.tensor(faces).long().to(device)
else:
lines = sa.load_lines(f'data/{category}/{ind}.txt')
hier_prog = make_hier_prog(lines)
verts, faces = hier_execute(hier_prog)
utils.writeObj(verts, faces, f'gen_comps/gt_train/{category}/{ind}.obj')
train_samples.append((verts, faces))
for ind in tqdm(val_inds[:num]):
if ind+'.obj' in gt_val_files:
verts, faces = utils.loadObj(f'gen_comps/gt_val/{category}/{ind}.obj')
verts = torch.tensor(verts).float().to(device)
faces = torch.tensor(faces).long().to(device)
else:
lines = sa.load_lines(f'data/{category}/{ind}.txt')
hier_prog = make_hier_prog(lines)
verts, faces = hier_execute(hier_prog)
utils.writeObj(verts, faces, f'gen_comps/gt_val/{category}/{ind}.obj')
val_samples.append((verts, faces))
train_surf_samples = getSamples(
train_samples[:num], True
)
val_surf_samples = getSamples(
val_samples[:num], True
)
return train_surf_samples, val_surf_samples
def getMinDist(p, samples):
min_dist = 1e8
for s in samples:
dist = chamfer.calc_metrics(
p.squeeze().T.unsqueeze(0).cpu(),
s.squeeze().T.unsqueeze(0).cpu(),
[]
)[0]
min_dist = min(dist.item(), min_dist)
return min_dist
def gen_metrics(
gen_progs, outpath, exp_name, epoch, VERBOSE, num_write, train_samps, val_samps
):
misses = 0.
results = {
'num_parts': [],
'rootedness': [],
'stability': [],
'gen': [],
'cov': [],
'var': [],
}
samples = []
print("DOING ROOTED AND STABLE")
for i, prog in enumerate(gen_progs):
try:
verts, faces = hier_execute(prog)
assert not torch.isnan(verts).any(), 'saw nan vert'
if i < num_write:
utils.writeObj(verts, faces, f"{outpath}/{exp_name}/objs/gen/{epoch}_{i}.obj")
if 'dsl_prog' in prog:
utils.writeHierProg(prog, 'dsl_prog', f"{outpath}/{exp_name}/programs/gen/{epoch}_{i}.txt")
else:
utils.sawriteHierProg(prog, f"{outpath}/{exp_name}/programs/gen/{epoch}_{i}.txt")
results['num_parts'].append(verts.shape[0] / 8.0)
samples.append((verts, faces))
except Exception as e:
misses += 1.
if VERBOSE:
print(f"failed gen metrics for {i} with {e}")
continue
if i < MAX_ROOTED_STABLE:
try:
if check_rooted(verts, faces):
results['rootedness'].append(1.)
else:
results['rootedness'].append(0.)
if check_stability(verts, faces):
results['stability'].append(1.)
else:
results['stability'].append(0.)
except Exception as e:
if VERBOSE:
print(f"failed rooted/stable with {e}")
for key in results:
if len(results[key]) > 0:
res = torch.tensor(results[key]).mean().item()
else:
res = 0.
results[key] = res
print("GETTING SAMPLES")
gen_samps = getSamples(samples, VERBOSE)
try:
assert len(gen_samps) > 0, 'no gen samps'
print("CALC GEN")
gen = cdpairs.calc_cd(
torch.stack([g[:CD_NUM_SAMPS] for g in gen_samps[:MAX_VARI]]),
torch.stack([g[:CD_NUM_SAMPS] for g in train_samps[:MAX_VARI]]),
False
)
print("CALC COV")
cov = cdpairs.calc_cd(
torch.stack([g[:CD_NUM_SAMPS] for g in val_samps[:MAX_VARI]]),
torch.stack([g[:CD_NUM_SAMPS] for g in gen_samps[:MAX_VARI]]),
False
)
print("CALC VAR")
var = cdpairs.calc_cd(
torch.stack([g[:CD_NUM_SAMPS] for g in gen_samps[:MAX_VARI]]),
torch.stack([g[:CD_NUM_SAMPS] for g in gen_samps[:MAX_VARI]]),
True
)
results['gen'] = gen
results['cov'] = cov
results['var'] = var
except Exception as e:
results['gen'] = 1.0
results['cov'] = 1.0
results['var'] = 1.0
if VERBOSE:
print(f"failed NN comparisons with {e}")
try:
results['val_fd'] = get_fd(
[g.cpu() for g in gen_samps],
[v.cpu() for v in val_samps],
None)
except Exception as e:
results['val_fd'] = 100.
if VERBOSE:
print(f"failed getting val variance with {e}")
try:
results['train_fd'] = get_fd(
[g.cpu() for g in gen_samps],
[t.cpu() for t in train_samps],
None)
except Exception as e:
results['train_fd'] = 100.
if VERBOSE:
print(f"failed getting train variance with {e}")
return results, misses
if __name__ == '__main__':
from ShapeAssembly import ShapeAssembly, make_hier_prog, hier_execute
import sys
import time
sa = ShapeAssembly()
meshes = []
for ind in list(os.listdir('data/chair'))[:int(sys.argv[1])]:
lines = sa.load_lines(f'data/chair/{ind}')
hier_prog = make_hier_prog(lines)
verts, faces = hier_execute(hier_prog)
meshes.append((verts, faces))
gen_samps = getSamples(meshes, True)
t = time.time()
gen = cdpairs.calc_cd(torch.stack(gen_samps), torch.stack(gen_samps), True)
print(gen)
print(time.time() - t)
| 10,126 | 30.548287 | 111 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/valid.py | import argparse
from functools import reduce
import numpy as np
import os
import trimesh as tm
from trimesh.collision import CollisionManager
from trimesh.creation import box
import pickle
from tqdm import tqdm
import pybullet as p
import pybullet_data
from trimesh.util import concatenate as meshconcat
import xml.etree.ElementTree as xml
import json
import shutil
import sa_utils as utils
from sa_utils import sample_surface
import torch
import time
import random
import string
def check_rooted(verts, faces):
# Load up the mesh
mesh = tm.Trimesh(vertices=verts, faces=faces)
# Switch from y-up to z-up
mesh.vertices = mesh.vertices[:, [0, 2, 1]]
mesh.fix_normals()
# Find the height of the ground plane
z_ground = mesh.bounds[0][2]
# Extract the individual cuboid parts
comps = mesh.split().tolist()
# Also create a thin box for the ground plane
ground = box(
extents = [10, 10, 0.01],
transform = [
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, z_ground - 0.01/2],
[0, 0, 0, 1]
]
)
comps.insert(0, ground)
# Detect (approximate) intersections between parts
# collision_dist = 0.005 * mesh.scale
# collision_dist = 0.01 * mesh.scale
collision_dist = 0.02 * mesh.scale
adjacencies = {comp_index : [] for comp_index in range(len(comps))}
manager = CollisionManager()
for i in range(len(comps)-1):
manager.add_object(str(i), comps[i])
for j in range(i+1, len(comps)):
dist = manager.min_distance_single(comps[j])
if (dist < collision_dist):
adjacencies[i].append(j)
adjacencies[j].append(i)
manager.remove_object(str(i))
# Run a DFS starting from the ground, check if everything is reachable
visited = [False for comp in comps]
stack = [0] # Index of 'ground'
while len(stack) > 0:
nindex = stack.pop()
visited[nindex] = True
for cindex in adjacencies[nindex]:
if not visited[cindex]:
stack.append(cindex)
return all(visited)
def obj2urdf(verts, faces, output_dir, density=1):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Load up the mesh
mesh = tm.Trimesh(vertices=verts, faces=faces)
# Switch from y-up to z-up
mesh.vertices = mesh.vertices[:, [0, 2, 1]]
mesh.fix_normals()
# Extract the individual cuboid parts
comps = mesh.split().tolist()
# Detect (approximate) intersections between parts, to use for building joints
# collision_dist = 0.005 * mesh.scale
# collision_dist = 0.01 * mesh.scale
collision_dist = 0.02 * mesh.scale
adjacencies = {comp_index : [] for comp_index in range(len(comps))}
manager = CollisionManager()
for i in range(len(comps)-1):
manager.add_object(str(i), comps[i])
for j in range(i+1, len(comps)):
dist = manager.min_distance_single(comps[j])
if (dist < collision_dist):
adjacencies[i].append(j)
adjacencies[j].append(i)
manager.remove_object(str(i))
# Compute connected components
conn_comps = []
visited = [False for _ in comps]
while not all(visited):
conn_comp = set([])
start_idx = visited.index(False)
stack = [start_idx]
while len(stack) > 0:
idx = stack.pop()
visited[idx] = True
conn_comp.add(idx)
for nidx in adjacencies[idx]:
if not visited[nidx]:
stack.append(nidx)
conn_comps.append(list(conn_comp))
# We export one URDF object file per connected component
for i,conn_comp in enumerate(conn_comps):
# Re-center this connected component
ccmesh = meshconcat([comps[j] for j in conn_comp])
c = ccmesh.centroid
transmat = [
[1, 0, 0, -c[0]],
[0, 1, 0, -c[1]],
[0, 0, 1, -c[2]],
[0, 0, 0, 1]
]
for j in conn_comp:
comps[j].apply_transform(transmat)
ccmesh.apply_transform(transmat)
# Also, record where to start this mesh in the simulation
# That's the x,y coords of the centroid, and -bbox bottom for the z (so it sits on the ground)
# And the bbox diagonal (we use this for error thresholding)
metadata = {
'start_pos': [c[0], c[1], -ccmesh.bounds[0][2]],
'volume': ccmesh.volume,
'height': abs(ccmesh.bounds[0][2] - ccmesh.bounds[1][2]),
'base': min(abs(ccmesh.bounds[0][0] - ccmesh.bounds[1][0]), abs(ccmesh.bounds[0][1] - ccmesh.bounds[1][1]))
}
with open(f'{output_dir}/assembly_{i}.json', 'w') as f:
f.write(json.dumps(metadata))
# Build a directed tree by DFS
root_idx = conn_comp[0]
root = {'id': root_idx, 'children': []}
fringe = [root]
visited = set([root['id']])
while len(fringe) > 0:
node = fringe.pop()
for neighbor in adjacencies[node['id']]:
if not (neighbor in visited):
child_node = {'id': neighbor, 'children': []}
node['children'].append(child_node)
visited.add(child_node['id'])
fringe.append(child_node)
# Build up the URDF data structure
urdf_root = xml.Element('robot')
urdf_root.set('name', 'part_graph_shape')
# Links
for j in conn_comp:
comp = comps[j]
link = xml.SubElement(urdf_root, 'link')
link.set('name', f'part_{j}')
visual = xml.SubElement(link, 'visual')
geometry = xml.SubElement(visual, 'geometry')
mesh = xml.SubElement(geometry, 'mesh')
mesh.set('filename', f'{output_dir}/part_{j}.stl')
material = xml.SubElement(visual, 'material')
material.set('name', 'gray')
color = xml.SubElement(material, 'color')
color.set('rgba', '0.5 0.5 0.5 1')
collision = xml.SubElement(link, 'collision')
geometry = xml.SubElement(collision, 'geometry')
mesh = xml.SubElement(geometry, 'mesh')
mesh.set('filename', f'{output_dir}/part_{j}.stl')
inertial = xml.SubElement(link, 'inertial')
mass = xml.SubElement(inertial, 'mass')
mass.set('value', str(comp.volume * density))
inertia = xml.SubElement(inertial, 'inertia')
inertia.set('ixx', '1.0')
inertia.set('ixy', '0.0')
inertia.set('ixz', '0.0')
inertia.set('iyy', '1.0')
inertia.set('iyz', '0.0')
inertia.set('izz', '1.0')
# Joints
fringe = [root]
while len(fringe) > 0:
node = fringe.pop()
for child_node in node['children']:
joint = xml.SubElement(urdf_root, 'joint')
joint.set('name', f'{node["id"]}_to_{child_node["id"]}')
joint.set('type', 'fixed')
parent = xml.SubElement(joint, 'parent')
parent.set('link', f'part_{node["id"]}')
child = xml.SubElement(joint, 'child')
child.set('link', f'part_{child_node["id"]}')
origin = xml.SubElement(joint, 'origin')
origin.set('xyz', '0 0 0')
fringe.append(child_node)
# Save URDF file to disk
# Have to make sure to split it into multiple lines, otherwise Bullet's URDF parser will
# throw an error trying to load really large files as a single line...
xmlstring = xml.tostring(urdf_root, encoding='unicode')
xmlstring = '>\n'.join(xmlstring.split('>'))
with open(f'{output_dir}/assembly_{i}.urdf', 'w') as f:
f.write(xmlstring)
# Write the parts to disk as STL files for the URDF to refer to
for i,comp in enumerate(comps):
comp.export(f'{output_dir}/part_{i}.stl')
def check_stability(verts, faces, gui=False):
# First, check if the file is even rooted.
# If it's not rooted, it can't be stable
if not check_rooted(verts, faces):
return False
# Start up the simulation
mode = p.GUI if gui else p.DIRECT
physicsClient = p.connect(mode)
p.setGravity(0, 0, -9.8)
# Load the ground plane
p.setAdditionalSearchPath(pybullet_data.getDataPath())
# print(pybullet_data.getDataPath())
planeId = p.loadURDF("plane.urdf")
# Convert the object to a URDF assembly, load it up
# There may be more than one URDF, if the object had more than one connected component
rand_str = ''.join(random.choices(string.ascii_lowercase, k=12))
obj2urdf(verts, faces, f'tmp/{rand_str}')
objIds = []
startPositions = {}
volumes = {}
heights = {}
bases = {}
for urdf in [f for f in os.listdir(f'tmp/{rand_str}') if os.path.splitext(f)[1] == '.urdf']:
with open(f'tmp/{rand_str}/{os.path.splitext(urdf)[0]}.json', 'r') as f:
data = json.loads(f.read())
startPos = data['start_pos']
startOrientation = p.getQuaternionFromEuler([0,0,0])
objId = p.loadURDF(f"tmp/{rand_str}/{urdf}",startPos, startOrientation)
objIds.append(objId)
startPositions[objId] = startPos
volumes[objId] = data['volume']
heights[objId] = data['height']
bases[objId] = data['base']
shutil.rmtree(f'tmp/{rand_str}')
# Disable collisions between all objects (we only want collisions between objects and the ground)
# That's because we want to check if the different components are *independently* stable, and
# having them hit each other might muck up that judgment
for i in range(0, len(objIds)-1):
ni = p.getNumJoints(objIds[i])
for j in range(i+1, len(objIds)):
nj = p.getNumJoints(objIds[j])
for k in range(-1, ni):
for l in range(-1, nj):
p.setCollisionFilterPair(objIds[i], objIds[j], k, l, False)
import math
# See if objects are stable from a small drop
for objId in objIds:
s = volumes[objId]
b = bases[objId]
v = s * b
# 800, 4, 4, 4
p.applyExternalForce(objId, -1, (0, 0, 400*v), startPositions[objId], p.WORLD_FRAME)
#p.applyExternalTorque(objId, -1, (0, 4*v, 0), p.WORLD_FRAME)
#p.applyExternalTorque(objId, -1, (4*v, 0, 0), p.WORLD_FRAME)
#p.applyExternalTorque(objId, -1, (0, 0, 80*v), p.WORLD_FRAME)
# Run simulation
if gui:
for i in range(600):
p.stepSimulation()
time.sleep(1./600.)
else:
for i in range(1000):
p.stepSimulation()
for objId in objIds:
endPos, _ = p.getBasePositionAndOrientation(objId)
zend = endPos[2]
zstart = startPositions[objId][2]
zdiff = abs(zstart - zend)
if zdiff > abs(0.025 * heights[objId]):
p.disconnect()
return False
p.disconnect()
return True
| 11,194 | 35.825658 | 119 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/pointnet2/setup.py | from __future__ import division, absolute_import, with_statement, print_function
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
import glob
try:
import builtins
except:
import __builtin__ as builtins
builtins.__POINTNET2_SETUP__ = True
import pointnet2
_ext_src_root = "pointnet2/_ext-src"
_ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob(
"{}/src/*.cu".format(_ext_src_root)
)
_ext_headers = glob.glob("{}/include/*".format(_ext_src_root))
requirements = ["etw_pytorch_utils==1.1.1", "h5py", "pprint", "enum34", "future"]
setup(
name="pointnet2",
version=pointnet2.__version__,
author="Erik Wijmans",
packages=find_packages(),
install_requires=requirements,
ext_modules=[
CUDAExtension(
name="pointnet2._ext",
sources=_ext_sources,
extra_compile_args={
"cxx": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
"nvcc": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
},
)
],
cmdclass={"build_ext": BuildExtension},
)
| 1,175 | 28.4 | 83 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/pointnet2/utils/pointnet2_utils.py | from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from torch.autograd import Function
import torch.nn as nn
import etw_pytorch_utils as pt_utils
import sys
try:
import builtins
except:
import __builtin__ as builtins
try:
import pointnet2._ext as _ext
except ImportError:
if not getattr(builtins, "__POINTNET2_SETUP__", False):
raise ImportError(
"Could not import _ext module.\n"
"Please see the setup instructions in the README: "
"https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst"
)
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(RandomDropout, self).__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
return _ext.furthest_point_sampling(xyz, npoint)
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
_, C, N = features.size()
ctx.for_backwards = (idx, C, N)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, m = ctx.three_interpolate_for_backward
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
ctx.for_backwards = (idx, N)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, N = ctx.for_backwards
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
return _ext.ball_query(new_xyz, xyz, radius, nsample)
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| 10,413 | 26.049351 | 103 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/pointnet2/utils/linalg_utils.py | from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from enum import Enum
import numpy as np
PDist2Order = Enum("PDist2Order", "d_first d_second")
def pdist2(X, Z=None, order=PDist2Order.d_second):
# type: (torch.Tensor, torch.Tensor, PDist2Order) -> torch.Tensor
r""" Calculates the pairwise distance between X and Z
D[b, i, j] = l2 distance X[b, i] and Z[b, j]
Parameters
---------
X : torch.Tensor
X is a (B, N, d) tensor. There are B batches, and N vectors of dimension d
Z: torch.Tensor
Z is a (B, M, d) tensor. If Z is None, then Z = X
Returns
-------
torch.Tensor
Distance matrix is size (B, N, M)
"""
if order == PDist2Order.d_second:
if X.dim() == 2:
X = X.unsqueeze(0)
if Z is None:
Z = X
G = np.matmul(X, Z.transpose(-2, -1))
S = (X * X).sum(-1, keepdim=True)
R = S.transpose(-2, -1)
else:
if Z.dim() == 2:
Z = Z.unsqueeze(0)
G = np.matmul(X, Z.transpose(-2, -1))
S = (X * X).sum(-1, keepdim=True)
R = (Z * Z).sum(-1, keepdim=True).transpose(-2, -1)
else:
if X.dim() == 2:
X = X.unsqueeze(0)
if Z is None:
Z = X
G = np.matmul(X.transpose(-2, -1), Z)
R = (X * X).sum(-2, keepdim=True)
S = R.transpose(-2, -1)
else:
if Z.dim() == 2:
Z = Z.unsqueeze(0)
G = np.matmul(X.transpose(-2, -1), Z)
S = (X * X).sum(-2, keepdim=True).transpose(-2, -1)
R = (Z * Z).sum(-2, keepdim=True)
return torch.abs(R + S - 2 * G).squeeze(0)
def pdist2_slow(X, Z=None):
if Z is None:
Z = X
D = torch.zeros(X.size(0), X.size(2), Z.size(2))
for b in range(D.size(0)):
for i in range(D.size(1)):
for j in range(D.size(2)):
D[b, i, j] = torch.dist(X[b, :, i], Z[b, :, j])
return D
if __name__ == "__main__":
X = torch.randn(2, 3, 5)
Z = torch.randn(2, 3, 3)
print(pdist2(X, order=PDist2Order.d_first))
print(pdist2_slow(X))
print(torch.dist(pdist2(X, order=PDist2Order.d_first), pdist2_slow(X)))
| 2,335 | 26.482353 | 83 | py |
ShapeMOD | ShapeMOD-main/SA_lang/tasks/pointnet2/utils/pointnet2_modules.py | from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.nn as nn
import torch.nn.functional as F
import etw_pytorch_utils as pt_utils
from pointnet2.utils import pointnet2_utils
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super(_PointnetSAModuleBase, self).__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz, features=None):
# type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, N, C) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = (
pointnet2_utils.gather_operation(
xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
)
.transpose(1, 2)
.contiguous()
if self.npoint is not None
else None
)
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
# type: (PointnetSAModuleMSG, int, List[float], List[int], List[List[int]], bool, bool) -> None
super(PointnetSAModuleMSG, self).__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None
else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True
):
# type: (PointnetSAModule, List[int], int, float, int, bool, bool) -> None
super(PointnetSAModule, self).__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz,
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, mlp, bn=True):
# type: (PointnetFPModule, List[int], bool) -> None
super(PointnetFPModule, self).__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(self, unknown, known, unknow_feats, known_feats):
# type: (PointnetFPModule, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*(known_feats.size()[0:2] + [unknown.size(1)])
)
if unknow_feats is not None:
new_features = torch.cat(
[interpolated_feats, unknow_feats], dim=1
) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
if __name__ == "__main__":
from torch.autograd import Variable
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True)
xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True)
test_module = PointnetSAModuleMSG(
npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]]
)
test_module.cuda()
print(test_module(xyz, xyz_feats))
# test_module = PointnetFPModule(mlp=[6, 6])
# test_module.cuda()
# from torch.autograd import gradcheck
# inputs = (xyz, xyz, None, xyz_feats)
# test = gradcheck(test_module, inputs, eps=1e-6, atol=1e-4)
# print(test)
for _ in range(1):
_, new_features = test_module(xyz, xyz_feats)
new_features.backward(torch.cuda.FloatTensor(*new_features.size()).fill_(1))
print(new_features)
print(xyz.grad)
| 7,338 | 30.229787 | 106 | py |
LINDA_DSS | LINDA_DSS-master/learning.py |
# update for tensorflow
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas as pd
import numpy as np
import seaborn as sns
import random as rn
import re
import warnings
import csv
import tensorflow as tf
# Force TensorFlow to single thread
# Multiple threads are a potential source of non-reprocible research resulsts
session_conf = tf.compat.v1.ConfigProto( intra_op_parallelism_threads=1, inter_op_parallelism_threads=1 )
# tf.set_random_seed() will make random number generation in the TensorFlow backend
# have a well defined initial state
# more details: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.compat.v1.set_random_seed(515)
# keras / deep learning libraries
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import model_from_json
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Nadam
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.utils import plot_model
# callbacks
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import ReduceLROnPlateau
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import matplotlib.image as mpimg
import pylab as pl
from pylab import savefig
plt.style.use('seaborn-deep')
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler,MinMaxScaler
# Bayesian networks
from sklearn.preprocessing import KBinsDiscretizer
from pylab import *
import pyAgrum as gum
import pyAgrum.lib.notebook as gnb
# for classification purposes
from pyAgrum.lib.bn2roc import showROC
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.metrics import roc_curve, auc
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import matplotlib.image as mpimg
import pylab as pl
from pylab import savefig
plt.style.use('seaborn-deep')
# RECALL -----------------------------------------------------------------------------
#
def recall_m(y_true, y_pred):
"""Computes the recal measure of an evaluation setting
Parameters
----------
y_true : list
list of groundtruth labels
y_pred : list
list of predictions from blackbox
Returns
-------
recall : vector
a vector with the recall values between the predictions and the groundtruths
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
# PRECISION ---------------------------------------------------------------------------
#
def precision_m(y_true, y_pred):
"""Computes the precision measure of an evaluation setting
Parameters
----------
y_true : list
list of groundtruth labels
y_pred : list
list of predictions from blackbox
Returns
-------
precision : vector
a vector with the precision values between the predictions and the groundtruths
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
# F1 ------------------------------------------------------------------------------------
# Computes the F1 measure of an evaluation setting
# y_true: list of groundtruth labels
# y_pred: list of predictions from blackbox
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
# CREATE_MODEL --------------------------------------------------------------------------
# creates a neural network model with a certain number of hidden layers and a certain
# number of neurons in each layer.
# input_dim: an integer specifying the number of input neurons
# output_dim: an integer specifying the number of output neurons (the number of labels)
# hidden_layers: an integer specifying the number of hidden layers
# loss_func: the loss function of the model. By default, it is applied the 'categorical_crossentropy'
# optim: the optimisation algorithm used in the model. By default it is used the 'nadam' algorithm
# metrics: a list of strings specifying the metrics to be evaluated ('accuracy', 'f1', 'recall','precision')
def create_model(input_dim, output_dim, nodes, hidden_layers=1, loss_func='categorical_crossentropy', optim='nadam', metrics=['accuracy'], name='model'):
model = Sequential(name=name)
model.add( Dense(nodes, input_dim=input_dim, activation='relu')) # input layer
for i in range(hidden_layers): # hidden layers
model.add(Dense(nodes, activation='relu'))
model.add(Dense(output_dim, activation='softmax')) # output layer
if( optim == "nadam" ): # Compile model
optim = keras.optimizers.Nadam(lr=0.0001, beta_1=0.9, beta_2=0.999)
model.compile(loss=loss_func, optimizer=optim,
metrics=metrics)
return model
# GRID_SEARCH -----------------------------------------------------------------------------
# Generates a set of models with different configurations, ranging from an
# initial number of neurons to a maximum number of neurons
# start_nodes: an integer specifying the initial number of neurons to generate a model from
# max_nodes: an integer specifying the maximum number of neurons to generate a model from
# max_hlayers: an integer specifying the maximum number of hidden layers to generate a model from
# debug: boolean that acts as a flag. If True, it displays the characteristics of each model
# metrics: a list of strings with the metrics to be evaluated
def grid_search_model_generator(n_features, n_classes, start_nodes = 1, max_nodes = 12, max_hlayers = 5, debug = False, metrics = ['accuracy'] ):
models = []
# generate different models with different neurons and different hidden layers
for neurons in range(start_nodes, max_nodes+1):
for hidden_layer in range(1, max_hlayers+1):
model_name = "model_h" + str(hidden_layer) + "_N"+str(neurons)
model = create_model(n_features, n_classes, neurons, hidden_layer, name=model_name, metrics = metrics)
models.append( model ) # add the generated model to a list
# plot general information for each model
if( debug ):
for model in models:
model.summary()
return models
# PERFORM_GRID_SEARCH -------------------------------------------------------------------
# given a list of models with different configurations, fit the data to the models,
# and evaluate the model. This function returns a list of training histories for each model
# models: list of models
# X_train:
# Y_train:
# X_validation:
# Y_validation:
# X_test:
# Y_test:
# batch_size:
# epochs:
def perform_grid_search( models, path, dataset_name, X_train, Y_train, X_validation, Y_validation, X_test, Y_test, batch_size, epochs ):
HISTORY_DICT = {}
# define the callebacks to take into consideration during training
# stop training when convergence is achieved after 10 iterations
early_stop = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='min')
# save the best model after every epoch
model_checkpoint = ModelCheckpoint(path + "training/" + dataset_name + "/model_{epoch:02d}-{val_loss:.2f}.h5", monitor='val_loss', verbose=0, save_best_only=True, mode='min')
callbacks_list = [early_stop, model_checkpoint]
# grid search over each model
for model in models:
print('MODEL NAME:', model.name)
history_callback = model.fit(X_train, Y_train, batch_size = batch_size, epochs = epochs, verbose=0, validation_data=(X_validation, Y_validation), callbacks=callbacks_list)
score_test = model.evaluate( X_test, Y_test, verbose=0 )
score_train = model.evaluate( X_train, Y_train )
print('Test loss: ', format(score_test[0], '.4f'), '\tTrain loss: ', format(score_train[0], '.4f') )
print('Test accuracy: ', format(score_test[1], '.4f'), '\tTrain accu: ', format(score_train[1], '.4f') )
print('Abs accuracy: ', format( np.abs( score_test[1] - score_train[1] ), '.4f'))
print('Abs loss: ', format( np.abs( score_test[0] - score_train[0] ), '.4f'))
print('\n###########################################################\n')
HISTORY_DICT[model.name] = [history_callback, model]
return HISTORY_DICT
# SAVE_MODEL -----------------------------------------------------------------------------
# saves a trained model into a json and hdf5 file
# model: model to be saved
# model_name: string with model name
# path: string with path to save
def save_model( model, model_name, path ):
# serialize model to JSON
model_json = model.to_json()
with open(path + model_name+"_DUO.json", "w") as json_file:
json_file.write(model_json)
json_file.close()
# serialize weights to HDF5
model.save_weights( path + model_name+"_DUO.h5")
print("Saving files:")
print(path + model_name+"_DUO.json")
print(path + model_name+"_DUO.h5")
print("Model saved to disk")
# SAVE_MODEL_HISTORY -------------------------------------------------------------------
# saves a trained model into a csv file
# model_hist: history of the model to be saved
# model_name: string with model name
# path: string with path to save
def save_model_history( model_hist, model_name, path ):
file = open(path + model_name + "_hist.csv", "w")
w = csv.writer( file )
for key, val in model_hist.history.items():
w.writerow([key, val])
file.close()
print(path + model_name+"_DUO.h5")
print("Model history saved to disk")
# LOAD_MODEL_HISTORY ------------------------------------------
# loads a saved model history into memory
# model_name: the name of the model
# path: path to model history
def load_model_history( model_name, path):
model_hist_loaded = {}
values = []
# load dictionary
r = open( path + model_name + "_hist.csv", "r").read()
for line in r.split("\n"):
if(len(line) == 0):
continue
metric = line.split(",\"[")[0] # extract metrics
values_str = line.split(",\"[")[1].replace("]\"","").split(", ") # extract validation values
values = [float(val_str) for val_str in values_str]
model_hist_loaded.update( {metric : values} )
return model_hist_loaded
# LOAD_MODEL ------------------------------------------
# loads a saved model into memory
# model_name: the name of the model
# path: path to model history
def load_model( model_name, path ):
json_file = open( path + model_name + "_DUO.json", 'r')
loaded_model_json = json_file.read()
json_file.close()
# load weights into new model
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(path + model_name + "_DUO.h5")
print("Loaded model from disk")
return loaded_model
def plot_model_history( model_history, metric ):
plt.plot(model_history[ metric.lower() ], label='train')
plt.plot(model_history["val_" + metric.lower()], label='validation')
plt.ylabel(metric)
plt.xlabel('Number of Epochs')
plt.ylim([0, 1])
plt.legend()
plt.show()
def plot_ROC_Curve( model, X, Y, n_classes):
Y_pred_proba = model.predict(X)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(Y[:, i], Y_pred_proba[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Plot of a ROC curve for a specific class
for i in range(n_classes):
plt.figure()
plt.plot(fpr[i], tpr[i], label='ROC curve (area = %0.2f)' % roc_auc[i])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# ENCODE_DATA --------------------------------------------------------------------------
# Applies one hot encoder to data
# data: a dataframe
# class_var: string with class variable name
def encode_data(data, class_var):
feature_names = data.drop([class_var], axis=1).columns.tolist()
X = data[ feature_names ].values
y = data[class_var].values
n_features = X.shape[1]
n_classes = len(data[class_var].unique())
# create numerical encoding for attribute species
enc = OneHotEncoder()
Y = enc.fit_transform(y[:, np.newaxis]).toarray()
# Scale data to have mean 0 and variance 1
# which is importance for convergence of the neural network
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(X)
return X_scaled, Y, enc, scaler
# LOAD_TRAINING_DATA ---------------------------------------------------------------------
# loads into a multiarray format a training set previously saved in a .csv file
# dataset_path: string containing the path where the files will be saved
def load_training_data( dataset_path ):
X_train = pd.read_csv(dataset_path.replace(".csv", "") + "_Xtrain.csv", index_col=False).values
X_test = pd.read_csv(dataset_path.replace(".csv", "") + "_Xtest.csv", index_col=False).values
X_validation =pd.read_csv(dataset_path.replace(".csv", "") + "_Xvalidation.csv",index_col=False).values
Y_train = pd.read_csv(dataset_path.replace(".csv", "") + "_Ytrain.csv",index_col=False).values
Y_test =pd.read_csv(dataset_path.replace(".csv", "") + "_Ytest.csv", index_col=False).values
Y_validation = pd.read_csv(dataset_path.replace(".csv", "") + "_Yvalidation.csv", index_col=False).values
return X_train, Y_train, X_test, Y_test, X_validation, Y_validation
# GENERATE_SAVE_TRAINING_DATA ------------------------------------------------------------
#
# dataset_path: string containing the path where the files will be saved
# X: NxM matrix representing the training data
# Y: NxC matrix representing the OneHotEconder of C classes
def generate_save_training_data( dataset_path, X, Y):
"""Generates training, test and validation sets and stores this information into files
Parameters
----------
dataset_path : str
The file location of the spreadsheet
samples : int, optional
The number of permutations to generate from the original vector (default is 300)
variance : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
permutations : matrix
a 2-D matrix with dimensions (samples, features) with all the permutations of the
original vector
"""
# generate train, test and validation sets
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=515)
X_validation, X_test, Y_validation, Y_test = train_test_split(X_test, Y_test, test_size=0.5, random_state=515)
np.savetxt(dataset_path.replace(".csv", "") + "_Xtrain.csv", X_train, delimiter=",")
np.savetxt(dataset_path.replace(".csv", "") + "_Xtest.csv", X_test, delimiter=",")
np.savetxt(dataset_path.replace(".csv", "") + "_Xvalidation.csv", X_validation, delimiter=",")
np.savetxt(dataset_path.replace(".csv", "") + "_Ytrain.csv", Y_train, delimiter=",")
np.savetxt(dataset_path.replace(".csv", "") + "_Ytest.csv", Y_test, delimiter=",")
np.savetxt(dataset_path.replace(".csv", "") + "_Yvalidation.csv", Y_validation, delimiter=",")
##############################################################################
# BAYESIAN NETWORK EXPLANATIONS #
##############################################################################
def compute_perm_range(feat, variance = 0.25):
"""
Parameters
----------
feat : float
Value of a feature to be permuted
samples : int, optional
The number of permutations to generate from the original vector (default is 300)
variance : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
min_range : float
minimum value that a feature can be permuted
max_range : float
maximum value that a feature can be permuted
"""
min_range = feat - variance
max_range = feat + variance
# features are scaled between 0 and 1
# if the permutation make the feature negative, this values is set to 0
if( min_range < 0 ):
min_range = 0
# if the permutation make the feature bigger than 1, this values is set to 1
if( max_range > 1 ):
max_range = 1
return min_range, max_range
# PERMUTE_SINGLE_FEATURES_____________________________________________________________
#
def permute_single_features( my_array, samples = 300, variance = 0.25 ):
"""Given a single array from which one pretends to generate local explanations from
Draw samples from a uniform distribution within a range of feature_val +- variance
Returns a matrix with a number of samples (by default 300) with permutations
of each feature of the input vector
Parameters
----------
my_array : np.array
The datapoint to be locally explained
samples : int, optional
The number of permutations to generate from the original vector (default is 300)
variance : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
permutations : matrix
a 2-D matrix with dimensions (samples, features) with all the permutations of the
original vector
"""
# permutation result list
permutations = []
# just keeping a controlled number of decimal places
my_array = np.round(my_array,4)
# keep a copy of the original array, since we will be changing the features
my_array_backup = my_array
# extract number of features
num_features = my_array.shape[0]
# add original vector to dataframe
permutations.append( my_array_backup.tolist() )
# for each feature of the input feature vector,
for feat in range(0, num_features):
# get feature value
my_array = my_array_backup
feature_val = my_array[feat]
# set permutation of feature between [ feat - variance ; feat + variance ]
min_range, max_range = compute_perm_range( feature_val, variance )
# generate sample of random features within a range
for perm in range(0, int(round(samples/num_features, 0))):
# set the new vector
my_array[feat] = np.abs(np.round(rn.uniform(min_range, max_range),4))
permutations.append( my_array.tolist() )
#rn.shuffle(permutations)
return permutations
def check_input( value ):
if value < 0:
return 0
if value > 1:
return 1
return value
def permute_single_features_circle( my_array, samples = 300, variance = 0.25 ):
# permutation result list
permutations = []
# just keeping a controlled number of decimal places
my_array = np.round(my_array,4)
# keep a copy of the original array, since we will be changing the features
my_array_backup = my_array
# extract number of features
num_features = my_array.shape[0]
# add original vector to dataframe
permutations.append( my_array_backup.tolist() )
# for each feature of the input feature vector,
for perm in range(0, int(round(samples/num_features, 0))):
# generate sample of random features within a range
temp1 = []
temp2 = []
for feat in range(0, num_features):
theta = 2*math.pi*np.random.random()
feature_val = my_array[feat]
# set the new vector
temp1.append( check_input( feature_val + np.round(np.random.uniform(0, variance),4)*math.cos(theta) ))
temp2.append( check_input( feature_val + np.round(np.random.uniform(0, variance),4)*math.sin(theta) ))
permutations.append( temp1 )
permutations.append( temp2 )
#rn.shuffle(permutations)
return permutations[0:samples]
# LEARNBN -------------------------------------------
#
def learnBN( file_path, algorithm = "Hill Climbing" ):
"""Given a single array from which one pretends to generate local explanations from
Draw samples from a uniform distribution within a range of feature_val +- variance
Returns a matrix with a number of samples (by default 300) with permutations
of each feature of the input vector
Parameters
----------
my_array : np.array
The datapoint to be locally explained
samples : int, optional
The number of permutations to generate from the original vector (default is 300)
variance : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
permutations : matrix
a 2-D matrix with dimensions (samples, features) with all the permutations of the
original vector
"""
learner = gum.BNLearner( file_path )
if( algorithm == "Hill Climbing"):
print("Selecting Greedy Hill Climbing Algorithm")
learner.useGreedyHillClimbing()
if( algorithm == "Local Search" ):
print("Selecting Local Search Algorithm")
bn = learner.useLocalSearchWithTabuList()
if( algorithm == "3off2"):
print("Selecting 3Off2 Algorithm")
learner.use3off2()
if( algorithm == "miic" ):
print("Selecting MIIC Algorithm")
learner.useMIIC()
learner.learnBN()
bn = learner.learnBN()
essencGraph = gum.EssentialGraph( bn )
infoBN = gnb.getInformation( bn )
return [ bn, infoBN, essencGraph ]
# DISCRETIZE_DATAFRAME -------------------------------------------------------
#
#
def discretize_dataframe( df, class_var, num_bins=4 ):
"""Given a dataframe with continuous values, convert the continuous values into discrete ones
by splitting the data into bins and by computing the respective quartiles
Parameters
----------
df : pd.DataFrame
The datapoint to be locally explained
class_var : str
The number of permutations to generate from the original vector (default is 300)
num_bins : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
permutations : matrix
a 2-D matrix with dimensions (samples, features) with all the permutations of the
original vector
"""
r=np.array(range(num_bins+1))/(1.0*num_bins)
# quantiles are building using pandas.qcut
# The "class" column is just copied.
l=[]
for col in df.columns.values:
if col!=class_var:
l.append( pd.DataFrame( pd.qcut( df[col],r, duplicates='drop',precision=2),columns=[col]))
else:
l.append( pd.DataFrame( df[col].values,columns=[col]))
treated = pd.concat(l, join='outer', axis=1)
return treated
# SAVE_DISCRETIZED_DATAFRAME ---------------------------------------------------
#
def save_discretized_dataframe(indx, df_model, model_type, perm_type, bins, dataset_name, path, class_var):
"""Given a single array from which one pretends to generate local explanations from
Draw samples from a uniform distribution within a range of feature_val +- variance
Returns a matrix with a number of samples (by default 300) with permutations
of each feature of the input vector
Parameters
----------
my_array : np.array
The datapoint to be locally explained
samples : int, optional
The number of permutations to generate from the original vector (default is 300)
variance : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
permutations : matrix
a 2-D matrix with dimensions (samples, features) with all the permutations of the
original vector
"""
file_path = path + dataset_name + "/" + str(indx) + "/" + re.sub( r"\.\w+", "", dataset_name ) + "_" + model_type +"_INDX_" + str(indx) + "_" + perm_type +".csv"
df_discr = discretize_dataframe( df_model, bins, class_var )
print("Saving discretized dataset into: %s\n" %(file_path))
df_discr.to_csv( file_path, index=False)
# WRAP_INFORMATION -------------------------------------------
#
def wrap_information( local_data_dict ):
true_positives = []
true_negatives = []
false_positives = []
false_negatives = []
for instance in local_data_dict:
# wrap up true positives
if( instance['prediction_type'] == 'TRUE POSITIVE'):
true_positives.append(instance)
# wrap up true negatives
if( instance['prediction_type'] == 'TRUE NEGATIVE' ):
true_negatives.append(instance)
# wrap up false positives
if( instance['prediction_type'] == 'FALSE POSITIVE' ):
false_positives.append(instance)
# wrap up false negatives
if( instance['prediction_type'] == 'FALSE NEGATIVE' ):
false_negatives.append(instance)
return true_positives, true_negatives, false_positives, false_negatives
# GENERATE_PERMUTATIONS -------------------------------------------
#
def generate_permutations( instance, labels_lst, feature_names, class_var, encoder, scaler, model, samples = 300, variance = 0.25):
# get datapoint in scaled feature space
local_datapoint = np.array(instance['scaled_vector'])
# get datapoint in original feature space
local_datapoint_orig = np.array(instance['original_vector'])
# permute features
permutations = permute_single_features( local_datapoint, samples = samples, variance = variance )
#permutations = permute_single_features_circle( local_datapoint, samples = samples, variance = variance )
# convert permutations to original feature space
permutations_orig = scaler.inverse_transform( permutations )
# compute predictions for each permuted instance
predictions = encoder.inverse_transform( model.predict( permutations ) )
# convert prediction classes to labels
labelled_predictions = [ labels_lst[ int(predictions[indx][0]) ] for indx in range(0, len(predictions))]
# add all this information to a single dataframe
df_local_permutations = pd.DataFrame( permutations_orig, columns = feature_names )
# add class variable to dataframe
df_local_permutations[ class_var ] = labelled_predictions
return df_local_permutations
# GEBERATE_BN_EXPLANATIONS ------------------------------------------------------------
#
def generate_BN_explanations(instance, label_lst, feature_names, class_var, encoder, scaler, model, path, dataset_name ):
# necessary for starting Numpy generated random numbers in an initial state
np.random.seed(515)
# Necessary for starting core Python generated random numbers in a state
rn.seed(515)
indx = instance['index']
prediction_type = instance['prediction_type'].lower()+"s"
prediction_type = prediction_type.replace(" ", "_")
# generate permutations
df = generate_permutations( instance, label_lst, feature_names, class_var, encoder, scaler, model)
# discretize data
df_discr = discretize_dataframe( df, class_var, num_bins=4 )
# save discretised dataframe (for debugging and reproduceability purposes)
path_to_permutations = path + "feature_permutations/" + dataset_name.replace(".csv","") + "/" + prediction_type + "/" + str(indx) + ".csv"
df_discr.to_csv( path_to_permutations, index=False)
# normalise dataframe
normalise_dataframe( path_to_permutations )
# learn BN
bn, infoBN, essencGraph = learnBN( path_to_permutations.replace(".csv", "_norm.csv") )
# perform inference
inference = gnb.getInference(bn, evs={},targets=df_discr.columns.to_list(), size='12')
# show networks
gnb.sideBySide(*[bn, inference, infoBN ],
captions=[ "Bayesian Network", "Inference", "Information Network" ])
# save to file
path_to_explanation = path + "explanations/" + dataset_name.replace(".csv", "") + "/BN/" + prediction_type + "/"
gum.lib.bn2graph.dotize( bn , path_to_explanation + str(indx) + "_BN" )
gum.saveBN(bn,path_to_explanation + str(indx) + "_BN.net" )
return [bn, inference, infoBN]
# GEBERATE_BN_EXPLANATIONSMB ------------------------------------------------------------
#
def generate_BN_explanationsMB(instance, label_lst, feature_names, class_var, encoder, scaler, model, path, dataset_name, variance = 0.1, algorithm = "Hill Climbing" ):
# necessary for starting Numpy generated random numbers in an initial state
np.random.seed(515)
# Necessary for starting core Python generated random numbers in a state
rn.seed(515)
indx = instance['index']
prediction_type = instance['prediction_type'].lower()+"s"
prediction_type = prediction_type.replace(" ", "_")
# generate permutations
df = generate_permutations( instance, label_lst, feature_names, class_var, encoder, scaler, model, variance = variance)
# discretize data
df_discr = discretize_dataframe( df, class_var, num_bins=4 )
# save discretised dataframe (for debugging and reproduceability purposes)
path_to_permutations = path + "feature_permutations/" + dataset_name.replace(".csv","") + "/" + prediction_type + "/" + str(indx) + ".csv"
df_discr.to_csv( path_to_permutations, index=False)
# normalise dataframe
normalise_dataframe( path_to_permutations )
# learn BN
bn, infoBN, essencGraph = learnBN( path_to_permutations.replace(".csv", "_norm.csv"), algorithm = algorithm)
# perform inference
inference = gnb.getInference(bn, evs={},targets=df_discr.columns.to_list(), size='12')
# compute Markov Blanket
markov_blanket = gum.MarkovBlanket(bn, class_var)
# show networks
# gnb.sideBySide(*[bn, inference, markov_blanket ],
# captions=[ "Bayesian Network", "Inference", "Markov Blanket" ])
# save to file
path_to_explanation = path + "explanations/" + dataset_name.replace(".csv", "") + "/BN/" + prediction_type + "/"
gum.lib.bn2graph.dotize( bn , path_to_explanation + str(indx) + "_BN" )
gum.saveBN(bn,path_to_explanation + str(indx) + "_BN.net" )
return [bn, inference, infoBN, markov_blanket]
# GENERATE_LOCAL_PREDICTIONS -------------------------------------------
#
def generate_local_predictions( X, Y, model, scaler, encoder ):
# get original vector
orig_vec = np.round(scaler.inverse_transform(X),6)
# generate all predictions for X
predictions = model.predict( X )
# extrace the label of the prediction of X[indx]
prediction_class = encoder.inverse_transform( predictions )
local_data_dict = []
for indx in range(0, orig_vec.shape[0]):
ground_truth = np.expand_dims(Y[indx], axis=0)
ground_truth_class = encoder.inverse_transform( ground_truth )[0][0]
prediction = prediction_class[indx][0]
# check if data point is a true positive
if( ( int(prediction) == int(ground_truth_class) ) & (int(prediction)==1) & (int(ground_truth_class)==1) ):
pred_type = "TRUE POSITIVE"
# check if data point is a true negative
if( ( int(prediction) == int(ground_truth_class) ) & (int(prediction)==0) & (int(ground_truth_class)==0) ):
pred_type = "TRUE NEGATIVE"
# check if data point is a false negative
if( ( int(prediction) != int(ground_truth_class) ) & (int(prediction)==0) & (int(ground_truth_class)==1) ):
pred_type = "FALSE NEGATIVE"
# check if data point is a false positve
if( ( int(prediction) != int(ground_truth_class) ) & (int(prediction)==1) & (int(ground_truth_class)==0) ):
pred_type = "FALSE POSITIVE"
local_data_dict.append( {'index' : indx,
'original_vector' : orig_vec[indx,:].tolist(),
'scaled_vector' : X[indx,:].tolist(),
'ground_truth' : ground_truth_class,
'predictions' : prediction,
'prediction_type' : pred_type})
return local_data_dict
##################################################################################
# TEXT PROCESSING #
# ###############################################################################
# FIND -----------------------------------------------
#
def find(s, ch):
return [i for i, ltr in enumerate(s) if ltr == ch]
# UNTOKENIZE -----------------------------------------------
#
def untokenize( tokens, delim ):
untokenized = tokens[0]
for indx in range(1, len(tokens)):
untokenized = untokenized + delim + tokens[indx]
return untokenized
# NORMALISE_LINE -------------------------------------------
#
def normalise_line( my_str, class_label ):
my_str = my_str.replace("\","+class_label, "")
my_str = my_str.replace("-1e-05", "0.0000")
tokens = my_str.split("\",\"")
tokens_norm = []
for token in tokens:
token = token.replace("]","")
indxs = find(token, ".")
indx_comma = find(token, ",")[0]+2
if( (len(token[indxs[1]+1 : -1 ]) >= 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) >= 4) ):
token_temp = token[0:indxs[0]] + "." + token[indxs[0] + 1 : indxs[0]+5] + ", " +token[indx_comma:indxs[1]] + token[indxs[1] : indxs[1]+5 ] + "]"
if( (len(token[indxs[1]+1 : -1 ]) < 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) >= 4) ):
extra = "0"*(np.abs(len(token[indxs[1]+1 : -1 ]) - 4))
token_temp = token[0:indxs[0]] + "." + token[indxs[0] + 1 : indxs[0]+5] + ", " +token[indx_comma:indxs[1]] + token[indxs[1] : -1 ] + extra + "]"
if( (len(token[indxs[1]+1 : -1 ]) >= 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) < 4) ):
extra = "0"*(np.abs(len( token[indxs[0]+1 : indx_comma-2 ]) - 4))
token_temp = token[0:indxs[0]] + "." + extra + ", " +token[indx_comma:indxs[1]] + token[indxs[1] : -1 ] + extra + "]"
if( (len(token[indxs[1]+1 : -1 ]) < 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) < 4) ):
extra2 = "0"*(np.abs(len(token[indxs[1]+1 : -1 ]) - 4))
extra1 = "0"*(np.abs(len(token[indxs[0]+1 : -1 ]) - 4))
token_temp = token[0:indxs[0]] + "." + extra1 + ", " +token[indx_comma:indxs[1]] + token[indxs[1] : -1 ] + extra2 + "]"
tokens_norm.append(token_temp)
return untokenize( tokens_norm, "\",\"") + "\"," +class_label
# NORMALISE_LINE -------------------------------------------
#
def normalise_dataframe( path_to_permutations ):
file = open(path_to_permutations,"r")
f_write = open(path_to_permutations.replace(".csv", "_norm.csv"),"w")
header = file.readline().replace("\n","")
f_write.write( header + "\n")
for line in file.readlines():
# get class
class_label = line.split("\",")[-1].replace("\n","")
# normalise dataframe input
line_norm = normalise_line( line.replace("\n",""), class_label )
# write normalised input to file
f_write.write(line_norm + "\n")
file.close()
f_write.close()
| 35,736 | 37.099147 | 177 | py |
LINDA_DSS | LINDA_DSS-master/Experiments/impact_of_Learning_algo/learning.py |
# update for tensorflow
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas as pd
import numpy as np
import seaborn as sns
import random as rn
import re
import warnings
import csv
import tensorflow as tf
# Force TensorFlow to single thread
# Multiple threads are a potential source of non-reprocible research resulsts
session_conf = tf.compat.v1.ConfigProto( intra_op_parallelism_threads=1, inter_op_parallelism_threads=1 )
# tf.set_random_seed() will make random number generation in the TensorFlow backend
# have a well defined initial state
# more details: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.compat.v1.set_random_seed(515)
# keras / deep learning libraries
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import model_from_json
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Nadam
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.utils import plot_model
# callbacks
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import ReduceLROnPlateau
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import matplotlib.image as mpimg
import pylab as pl
from pylab import savefig
plt.style.use('seaborn-deep')
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler,MinMaxScaler
# Bayesian networks
from sklearn.preprocessing import KBinsDiscretizer
from pylab import *
import pyAgrum as gum
import pyAgrum.lib.notebook as gnb
# for classification purposes
from pyAgrum.lib.bn2roc import showROC
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.metrics import roc_curve, auc
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import matplotlib.image as mpimg
import pylab as pl
from pylab import savefig
plt.style.use('seaborn-deep')
# RECALL -----------------------------------------------------------------------------
#
def recall_m(y_true, y_pred):
"""Computes the recal measure of an evaluation setting
Parameters
----------
y_true : list
list of groundtruth labels
y_pred : list
list of predictions from blackbox
Returns
-------
recall : vector
a vector with the recall values between the predictions and the groundtruths
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
# PRECISION ---------------------------------------------------------------------------
#
def precision_m(y_true, y_pred):
"""Computes the precision measure of an evaluation setting
Parameters
----------
y_true : list
list of groundtruth labels
y_pred : list
list of predictions from blackbox
Returns
-------
precision : vector
a vector with the precision values between the predictions and the groundtruths
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
# F1 ------------------------------------------------------------------------------------
# Computes the F1 measure of an evaluation setting
# y_true: list of groundtruth labels
# y_pred: list of predictions from blackbox
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
# CREATE_MODEL --------------------------------------------------------------------------
# creates a neural network model with a certain number of hidden layers and a certain
# number of neurons in each layer.
# input_dim: an integer specifying the number of input neurons
# output_dim: an integer specifying the number of output neurons (the number of labels)
# hidden_layers: an integer specifying the number of hidden layers
# loss_func: the loss function of the model. By default, it is applied the 'categorical_crossentropy'
# optim: the optimisation algorithm used in the model. By default it is used the 'nadam' algorithm
# metrics: a list of strings specifying the metrics to be evaluated ('accuracy', 'f1', 'recall','precision')
def create_model(input_dim, output_dim, nodes, hidden_layers=1, loss_func='categorical_crossentropy', optim='nadam', metrics=['accuracy'], name='model'):
model = Sequential(name=name)
model.add( Dense(nodes, input_dim=input_dim, activation='relu')) # input layer
for i in range(hidden_layers): # hidden layers
model.add(Dense(nodes, activation='relu'))
model.add(Dense(output_dim, activation='softmax')) # output layer
if( optim == "nadam" ): # Compile model
optim = keras.optimizers.Nadam(lr=0.0001, beta_1=0.9, beta_2=0.999)
model.compile(loss=loss_func, optimizer=optim,
metrics=metrics)
return model
# GRID_SEARCH -----------------------------------------------------------------------------
# Generates a set of models with different configurations, ranging from an
# initial number of neurons to a maximum number of neurons
# start_nodes: an integer specifying the initial number of neurons to generate a model from
# max_nodes: an integer specifying the maximum number of neurons to generate a model from
# max_hlayers: an integer specifying the maximum number of hidden layers to generate a model from
# debug: boolean that acts as a flag. If True, it displays the characteristics of each model
# metrics: a list of strings with the metrics to be evaluated
def grid_search_model_generator(n_features, n_classes, start_nodes = 1, max_nodes = 12, max_hlayers = 5, debug = False, metrics = ['accuracy'] ):
models = []
# generate different models with different neurons and different hidden layers
for neurons in range(start_nodes, max_nodes+1):
for hidden_layer in range(1, max_hlayers+1):
model_name = "model_h" + str(hidden_layer) + "_N"+str(neurons)
model = create_model(n_features, n_classes, neurons, hidden_layer, name=model_name, metrics = metrics)
models.append( model ) # add the generated model to a list
# plot general information for each model
if( debug ):
for model in models:
model.summary()
return models
# PERFORM_GRID_SEARCH -------------------------------------------------------------------
# given a list of models with different configurations, fit the data to the models,
# and evaluate the model. This function returns a list of training histories for each model
# models: list of models
# X_train:
# Y_train:
# X_validation:
# Y_validation:
# X_test:
# Y_test:
# batch_size:
# epochs:
def perform_grid_search( models, path, dataset_name, X_train, Y_train, X_validation, Y_validation, X_test, Y_test, batch_size, epochs ):
HISTORY_DICT = {}
# define the callebacks to take into consideration during training
# stop training when convergence is achieved after 10 iterations
early_stop = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='min')
# save the best model after every epoch
model_checkpoint = ModelCheckpoint(path + "training/" + dataset_name + "/model_{epoch:02d}-{val_loss:.2f}.h5", monitor='val_loss', verbose=0, save_best_only=True, mode='min')
callbacks_list = [early_stop, model_checkpoint]
# grid search over each model
for model in models:
print('MODEL NAME:', model.name)
history_callback = model.fit(X_train, Y_train, batch_size = batch_size, epochs = epochs, verbose=0, validation_data=(X_validation, Y_validation), callbacks=callbacks_list)
score_test = model.evaluate( X_test, Y_test, verbose=0 )
score_train = model.evaluate( X_train, Y_train )
print('Test loss: ', format(score_test[0], '.4f'), '\tTrain loss: ', format(score_train[0], '.4f') )
print('Test accuracy: ', format(score_test[1], '.4f'), '\tTrain accu: ', format(score_train[1], '.4f') )
print('Abs accuracy: ', format( np.abs( score_test[1] - score_train[1] ), '.4f'))
print('Abs loss: ', format( np.abs( score_test[0] - score_train[0] ), '.4f'))
print('\n###########################################################\n')
HISTORY_DICT[model.name] = [history_callback, model]
return HISTORY_DICT
# SAVE_MODEL -----------------------------------------------------------------------------
# saves a trained model into a json and hdf5 file
# model: model to be saved
# model_name: string with model name
# path: string with path to save
def save_model( model, model_name, path ):
# serialize model to JSON
model_json = model.to_json()
with open(path + model_name+"_DUO.json", "w") as json_file:
json_file.write(model_json)
json_file.close()
# serialize weights to HDF5
model.save_weights( path + model_name+"_DUO.h5")
print("Saving files:")
print(path + model_name+"_DUO.json")
print(path + model_name+"_DUO.h5")
print("Model saved to disk")
# SAVE_MODEL_HISTORY -------------------------------------------------------------------
# saves a trained model into a csv file
# model_hist: history of the model to be saved
# model_name: string with model name
# path: string with path to save
def save_model_history( model_hist, model_name, path ):
file = open(path + model_name + "_hist.csv", "w")
w = csv.writer( file )
for key, val in model_hist.history.items():
w.writerow([key, val])
file.close()
print(path + model_name+"_DUO.h5")
print("Model history saved to disk")
# LOAD_MODEL_HISTORY ------------------------------------------
# loads a saved model history into memory
# model_name: the name of the model
# path: path to model history
def load_model_history( model_name, path):
model_hist_loaded = {}
values = []
# load dictionary
r = open( path + model_name + "_hist.csv", "r").read()
for line in r.split("\n"):
if(len(line) == 0):
continue
metric = line.split(",\"[")[0] # extract metrics
values_str = line.split(",\"[")[1].replace("]\"","").split(", ") # extract validation values
values = [float(val_str) for val_str in values_str]
model_hist_loaded.update( {metric : values} )
return model_hist_loaded
# LOAD_MODEL ------------------------------------------
# loads a saved model into memory
# model_name: the name of the model
# path: path to model history
def load_model( model_name, path ):
json_file = open( path + model_name + "_DUO.json", 'r')
loaded_model_json = json_file.read()
json_file.close()
# load weights into new model
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(path + model_name + "_DUO.h5")
print("Loaded model from disk")
return loaded_model
def plot_model_history( model_history, metric ):
plt.plot(model_history[ metric.lower() ], label='train')
plt.plot(model_history["val_" + metric.lower()], label='validation')
plt.ylabel(metric)
plt.xlabel('Number of Epochs')
plt.ylim([0, 1])
plt.legend()
plt.show()
def plot_ROC_Curve( model, X, Y, n_classes):
Y_pred_proba = model.predict(X)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(Y[:, i], Y_pred_proba[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Plot of a ROC curve for a specific class
for i in range(n_classes):
plt.figure()
plt.plot(fpr[i], tpr[i], label='ROC curve (area = %0.2f)' % roc_auc[i])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# ENCODE_DATA --------------------------------------------------------------------------
# Applies one hot encoder to data
# data: a dataframe
# class_var: string with class variable name
def encode_data(data, class_var):
feature_names = data.drop([class_var], axis=1).columns.tolist()
X = data[ feature_names ].values
y = data[class_var].values
n_features = X.shape[1]
n_classes = len(data[class_var].unique())
# create numerical encoding for attribute species
enc = OneHotEncoder()
Y = enc.fit_transform(y[:, np.newaxis]).toarray()
# Scale data to have mean 0 and variance 1
# which is importance for convergence of the neural network
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(X)
return X_scaled, Y, enc, scaler
# LOAD_TRAINING_DATA ---------------------------------------------------------------------
# loads into a multiarray format a training set previously saved in a .csv file
# dataset_path: string containing the path where the files will be saved
def load_training_data( dataset_path ):
X_train = pd.read_csv(dataset_path.replace(".csv", "") + "_Xtrain.csv", index_col=False).values
X_test = pd.read_csv(dataset_path.replace(".csv", "") + "_Xtest.csv", index_col=False).values
X_validation =pd.read_csv(dataset_path.replace(".csv", "") + "_Xvalidation.csv",index_col=False).values
Y_train = pd.read_csv(dataset_path.replace(".csv", "") + "_Ytrain.csv",index_col=False).values
Y_test =pd.read_csv(dataset_path.replace(".csv", "") + "_Ytest.csv", index_col=False).values
Y_validation = pd.read_csv(dataset_path.replace(".csv", "") + "_Yvalidation.csv", index_col=False).values
return X_train, Y_train, X_test, Y_test, X_validation, Y_validation
# GENERATE_SAVE_TRAINING_DATA ------------------------------------------------------------
#
# dataset_path: string containing the path where the files will be saved
# X: NxM matrix representing the training data
# Y: NxC matrix representing the OneHotEconder of C classes
def generate_save_training_data( dataset_path, X, Y):
"""Generates training, test and validation sets and stores this information into files
Parameters
----------
dataset_path : str
The file location of the spreadsheet
samples : int, optional
The number of permutations to generate from the original vector (default is 300)
variance : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
permutations : matrix
a 2-D matrix with dimensions (samples, features) with all the permutations of the
original vector
"""
# generate train, test and validation sets
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=515)
X_validation, X_test, Y_validation, Y_test = train_test_split(X_test, Y_test, test_size=0.5, random_state=515)
np.savetxt(dataset_path.replace(".csv", "") + "_Xtrain.csv", X_train, delimiter=",")
np.savetxt(dataset_path.replace(".csv", "") + "_Xtest.csv", X_test, delimiter=",")
np.savetxt(dataset_path.replace(".csv", "") + "_Xvalidation.csv", X_validation, delimiter=",")
np.savetxt(dataset_path.replace(".csv", "") + "_Ytrain.csv", Y_train, delimiter=",")
np.savetxt(dataset_path.replace(".csv", "") + "_Ytest.csv", Y_test, delimiter=",")
np.savetxt(dataset_path.replace(".csv", "") + "_Yvalidation.csv", Y_validation, delimiter=",")
# GENERATE_CONFUSION_MATRIX_DATA ---------------------------------
##############################################################################
# BAYESIAN NETWORK EXPLANATIONS #
##############################################################################
def compute_perm_range(feat, variance = 0.25):
"""
Parameters
----------
feat : float
Value of a feature to be permuted
samples : int, optional
The number of permutations to generate from the original vector (default is 300)
variance : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
min_range : float
minimum value that a feature can be permuted
max_range : float
maximum value that a feature can be permuted
"""
min_range = feat - variance
max_range = feat + variance
# features are scaled between 0 and 1
# if the permutation make the feature negative, this values is set to 0
if( min_range < 0 ):
min_range = 0
# if the permutation make the feature bigger than 1, this values is set to 1
if( max_range > 1 ):
max_range = 1
return min_range, max_range
# PERMUTE_SINGLE_FEATURES_____________________________________________________________
#
def permute_single_features( my_array, samples = 300, variance = 0.25 ):
"""Given a single array from which one pretends to generate local explanations from
Draw samples from a uniform distribution within a range of feature_val +- variance
Returns a matrix with a number of samples (by default 300) with permutations
of each feature of the input vector
Parameters
----------
my_array : np.array
The datapoint to be locally explained
samples : int, optional
The number of permutations to generate from the original vector (default is 300)
variance : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
permutations : matrix
a 2-D matrix with dimensions (samples, features) with all the permutations of the
original vector
"""
# permutation result list
permutations = []
# just keeping a controlled number of decimal places
my_array = np.round(my_array,4)
# keep a copy of the original array, since we will be changing the features
my_array_backup = my_array
# extract number of features
num_features = my_array.shape[0]
# add original vector to dataframe
permutations.append( my_array_backup.tolist() )
# for each feature of the input feature vector,
for feat in range(0, num_features):
# get feature value
my_array = my_array_backup
feature_val = my_array[feat]
# set permutation of feature between [ feat - variance ; feat + variance ]
min_range, max_range = compute_perm_range( feature_val, variance )
# generate sample of random features within a range
for perm in range(0, int(round(samples/num_features, 0))):
# set the new vector
my_array[feat] = np.abs(np.round(rn.uniform(min_range, max_range),4))
permutations.append( my_array.tolist() )
#rn.shuffle(permutations)
return permutations
# LEARNBN -------------------------------------------
#
def learnBN( file_path, algorithm = "Hill Climbing" ):
"""Given a single array from which one pretends to generate local explanations from
Draw samples from a uniform distribution within a range of feature_val +- variance
Returns a matrix with a number of samples (by default 300) with permutations
of each feature of the input vector
Parameters
----------
my_array : np.array
The datapoint to be locally explained
samples : int, optional
The number of permutations to generate from the original vector (default is 300)
variance : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
permutations : matrix
a 2-D matrix with dimensions (samples, features) with all the permutations of the
original vector
"""
learner = gum.BNLearner( file_path )
if( algorithm == "Hill Climbing"):
print("Selecting Greedy Hill Climbing Algorithm")
learner.useGreedyHillClimbing()
if( algorithm == "Local Search" ):
print("Selecting Local Search Algorithm")
bn = learner.useLocalSearchWithTabuList()
if( algorithm == "3off2"):
print("Selecting 3Off2 Algorithm")
learner.use3off2()
if( algorithm == "miic" ):
print("Selecting MIIC Algorithm")
learner.useMIIC()
learner.learnBN()
bn = learner.learnBN()
essencGraph = gum.EssentialGraph( bn )
infoBN = gnb.getInformation( bn )
return [ bn, infoBN, essencGraph ]
# DISCRETIZE_DATAFRAME -------------------------------------------------------
#
#
def discretize_dataframe( df, class_var, num_bins=4 ):
"""Given a dataframe with continuous values, convert the continuous values into discrete ones
by splitting the data into bins and by computing the respective quartiles
Parameters
----------
df : pd.DataFrame
The datapoint to be locally explained
class_var : str
The number of permutations to generate from the original vector (default is 300)
num_bins : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
permutations : matrix
a 2-D matrix with dimensions (samples, features) with all the permutations of the
original vector
"""
r=np.array(range(num_bins+1))/(1.0*num_bins)
# quantiles are building using pandas.qcut
# The "class" column is just copied.
l=[]
for col in df.columns.values:
if col!=class_var:
l.append( pd.DataFrame( pd.qcut( df[col],r, duplicates='drop',precision=2),columns=[col]))
else:
l.append( pd.DataFrame( df[col].values,columns=[col]))
treated = pd.concat(l, join='outer', axis=1)
return treated
# SAVE_DISCRETIZED_DATAFRAME ---------------------------------------------------
#
def save_discretized_dataframe(indx, df_model, model_type, perm_type, bins, dataset_name, path, class_var):
"""Given a single array from which one pretends to generate local explanations from
Draw samples from a uniform distribution within a range of feature_val +- variance
Returns a matrix with a number of samples (by default 300) with permutations
of each feature of the input vector
Parameters
----------
my_array : np.array
The datapoint to be locally explained
samples : int, optional
The number of permutations to generate from the original vector (default is 300)
variance : int, optional
Quantity to permute in each feature (default is 0.25)
Returns
-------
permutations : matrix
a 2-D matrix with dimensions (samples, features) with all the permutations of the
original vector
"""
file_path = path + dataset_name + "/" + str(indx) + "/" + re.sub( r"\.\w+", "", dataset_name ) + "_" + model_type +"_INDX_" + str(indx) + "_" + perm_type +".csv"
df_discr = discretize_dataframe( df_model, bins, class_var )
print("Saving discretized dataset into: %s\n" %(file_path))
df_discr.to_csv( file_path, index=False)
# GENERATE_PERMUTATIONS -------------------------------------------
#
def generate_permutations( instance, labels_lst, feature_names, class_var, encoder, scaler, model, samples = 300, variance = 0.25):
# get datapoint in scaled feature space
local_datapoint = np.array(instance['scaled_vector'])
# get datapoint in original feature space
local_datapoint_orig = np.array(instance['original_vector'])
# permute features
permutations = permute_single_features( local_datapoint, samples = samples, variance = variance )
# convert permutations to original feature space
permutations_orig = scaler.inverse_transform( permutations )
# compute predictions for each permuted instance
predictions = encoder.inverse_transform( model.predict( permutations ) )
# convert prediction classes to labels
labelled_predictions = [ labels_lst[ int(predictions[indx][0]) ] for indx in range(0, len(predictions))]
# add all this information to a single dataframe
df_local_permutations = pd.DataFrame( permutations_orig, columns = feature_names )
# add class variable to dataframe
df_local_permutations[ class_var ] = labelled_predictions
return df_local_permutations
# GEBERATE_BN_EXPLANATIONS ------------------------------------------------------------
#
def generate_BN_explanations(instance, label_lst, feature_names, class_var, encoder, scaler, model, path, dataset_name ):
# necessary for starting Numpy generated random numbers in an initial state
np.random.seed(515)
# Necessary for starting core Python generated random numbers in a state
rn.seed(515)
indx = instance['index']
prediction_type = instance['prediction_type'].lower()+"s"
prediction_type = prediction_type.replace(" ", "_")
# generate permutations
df = generate_permutations( instance, label_lst, feature_names, class_var, encoder, scaler, model)
# discretize data
df_discr = discretize_dataframe( df, class_var, num_bins=4 )
# save discretised dataframe (for debugging and reproduceability purposes)
path_to_permutations = path + "feature_permutations/" + dataset_name.replace(".csv","") + "/" + prediction_type + "/" + str(indx) + ".csv"
df_discr.to_csv( path_to_permutations, index=False)
# normalise dataframe
normalise_dataframe( path_to_permutations )
# learn BN
bn, infoBN, essencGraph = learnBN( path_to_permutations.replace(".csv", "_norm.csv") )
# perform inference
inference = gnb.getInference(bn, evs={},targets=df_discr.columns.to_list(), size='12')
# show networks
gnb.sideBySide(*[bn, inference, infoBN ],
captions=[ "Bayesian Network", "Inference", "Information Network" ])
# save to file
path_to_explanation = path + "explanations/" + dataset_name.replace(".csv", "") + "/BN/" + prediction_type + "/"
gum.lib.bn2graph.dotize( bn , path_to_explanation + str(indx) + "_BN" )
gum.saveBN(bn,path_to_explanation + str(indx) + "_BN.net" )
return [bn, inference, infoBN]
# GEBERATE_BN_EXPLANATIONSMB ------------------------------------------------------------
#
def generate_BN_explanationsMB(instance, label_lst, feature_names, class_var, encoder, scaler, model, path, dataset_name, variance = 0.25, algorithm = "Hill Climbing" ):
# necessary for starting Numpy generated random numbers in an initial state
np.random.seed(515)
# Necessary for starting core Python generated random numbers in a state
rn.seed(515)
indx = instance['index']
prediction_type = instance['prediction_type'].lower()+"s"
prediction_type = prediction_type.replace(" ", "_")
# generate permutations
df = generate_permutations( instance, label_lst, feature_names, class_var, encoder, scaler, model, variance = variance)
# discretize data
df_discr = discretize_dataframe( df, class_var, num_bins=4 )
# save discretised dataframe (for debugging and reproduceability purposes)
path_to_permutations = path + "feature_permutations/" + dataset_name.replace(".csv","") + "/" + prediction_type + "/" + str(indx) + ".csv"
df_discr.to_csv( path_to_permutations, index=False)
# normalise dataframe
normalise_dataframe( path_to_permutations )
# learn BN
bn, infoBN, essencGraph = learnBN( path_to_permutations.replace(".csv", "_norm.csv"), algorithm = algorithm)
# perform inference
inference = gnb.getInference(bn, evs={},targets=df_discr.columns.to_list(), size='12')
# compute Markov Blanket
markov_blanket = gum.MarkovBlanket(bn, class_var)
# show networks
gnb.sideBySide(*[bn, inference, markov_blanket ],
captions=[ "Bayesian Network", "Inference", "Markov Blanket" ])
# save to file
path_to_explanation = path + "explanations/" + dataset_name.replace(".csv", "") + "/BN/" + prediction_type + "/"
gum.lib.bn2graph.dotize( bn , path_to_explanation + str(indx) + "_BN" )
gum.saveBN(bn,path_to_explanation + str(indx) + "_BN.net" )
return [bn, inference, infoBN, markov_blanket]
# GENERATE_LOCAL_PREDICTIONS -------------------------------------------
#
def generate_local_predictions( X, Y, model, scaler, encoder ):
# get original vector
orig_vec = np.round(scaler.inverse_transform(X),6)
# generate all predictions for X
predictions = model.predict( X )
# extrace the label of the prediction of X[indx]
prediction_class = encoder.inverse_transform( predictions )
local_data_dict = []
for indx in range(0, orig_vec.shape[0]):
ground_truth = np.expand_dims(Y[indx], axis=0)
ground_truth_class = encoder.inverse_transform( ground_truth )[0][0]
prediction = prediction_class[indx][0]
# check if data point is a true positive
if( ( int(prediction) == int(ground_truth_class) ) & (int(prediction)==1) & (int(ground_truth_class)==1) ):
pred_type = "TRUE POSITIVE"
# check if data point is a true negative
if( ( int(prediction) == int(ground_truth_class) ) & (int(prediction)==0) & (int(ground_truth_class)==0) ):
pred_type = "TRUE NEGATIVE"
# check if data point is a false negative
if( ( int(prediction) != int(ground_truth_class) ) & (int(prediction)==0) & (int(ground_truth_class)==1) ):
pred_type = "FALSE NEGATIVE"
# check if data point is a false positve
if( ( int(prediction) != int(ground_truth_class) ) & (int(prediction)==1) & (int(ground_truth_class)==0) ):
pred_type = "FALSE POSITIVE"
local_data_dict.append( {'index' : indx,
'original_vector' : orig_vec[indx,:].tolist(),
'scaled_vector' : X[indx,:].tolist(),
'ground_truth' : ground_truth_class,
'predictions' : prediction,
'prediction_type' : pred_type})
return local_data_dict
##################################################################################
# TEXT PROCESSING #
# ###############################################################################
# FIND -----------------------------------------------
#
def find(s, ch):
return [i for i, ltr in enumerate(s) if ltr == ch]
# UNTOKENIZE -----------------------------------------------
#
def untokenize( tokens, delim ):
untokenized = tokens[0]
for indx in range(1, len(tokens)):
untokenized = untokenized + delim + tokens[indx]
return untokenized
# NORMALISE_LINE -------------------------------------------
#
def normalise_line( my_str, class_label ):
my_str = my_str.replace("\","+class_label, "")
my_str = my_str.replace("-1e-05", "0.0000")
tokens = my_str.split("\",\"")
tokens_norm = []
for token in tokens:
token = token.replace("]","")
indxs = find(token, ".")
indx_comma = find(token, ",")[0]+2
if( (len(token[indxs[1]+1 : -1 ]) >= 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) >= 4) ):
token_temp = token[0:indxs[0]] + "." + token[indxs[0] + 1 : indxs[0]+5] + ", " +token[indx_comma:indxs[1]] + token[indxs[1] : indxs[1]+5 ] + "]"
if( (len(token[indxs[1]+1 : -1 ]) < 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) >= 4) ):
extra = "0"*(np.abs(len(token[indxs[1]+1 : -1 ]) - 4))
token_temp = token[0:indxs[0]] + "." + token[indxs[0] + 1 : indxs[0]+5] + ", " +token[indx_comma:indxs[1]] + token[indxs[1] : -1 ] + extra + "]"
if( (len(token[indxs[1]+1 : -1 ]) >= 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) < 4) ):
extra = "0"*(np.abs(len( token[indxs[0]+1 : indx_comma-2 ]) - 4))
token_temp = token[0:indxs[0]] + "." + extra + ", " +token[indx_comma:indxs[1]] + token[indxs[1] : -1 ] + extra + "]"
if( (len(token[indxs[1]+1 : -1 ]) < 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) < 4) ):
extra2 = "0"*(np.abs(len(token[indxs[1]+1 : -1 ]) - 4))
extra1 = "0"*(np.abs(len(token[indxs[0]+1 : -1 ]) - 4))
token_temp = token[0:indxs[0]] + "." + extra1 + ", " +token[indx_comma:indxs[1]] + token[indxs[1] : -1 ] + extra2 + "]"
tokens_norm.append(token_temp)
return untokenize( tokens_norm, "\",\"") + "\"," +class_label
# NORMALISE_LINE -------------------------------------------
#
def normalise_dataframe( path_to_permutations ):
file = open(path_to_permutations,"r")
f_write = open(path_to_permutations.replace(".csv", "_norm.csv"),"w")
header = file.readline().replace("\n","")
f_write.write( header + "\n")
for line in file.readlines():
# get class
class_label = line.split("\",")[-1].replace("\n","")
# normalise dataframe input
line_norm = normalise_line( line.replace("\n",""), class_label )
# write normalised input to file
f_write.write(line_norm + "\n")
file.close()
f_write.close()
| 33,430 | 37.783063 | 177 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/eval_wc.py |
from __future__ import print_function
import datetime
import time
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import codecs
from model.crf import *
from model.lm_lstm_crf import *
import model.utils as utils
from model.evaluator import eval_wc
import argparse
import json
import os
import sys
from tqdm import tqdm
import itertools
import functools
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Evaluating LM-BLSTM-CRF')
parser.add_argument('--load_arg', default='./checkpoint/soa/check_wc_p_char_lstm_crf.json', help='path to arg json')
parser.add_argument('--load_check_point', default='./checkpoint/soa/check_wc_p_char_lstm_crf.model', help='path to model checkpoint file')
parser.add_argument('--gpu',type=int, default=0, help='gpu id')
parser.add_argument('--eva_matrix', choices=['a', 'fa'], default='fa', help='use f1 and accuracy or f1 alone')
parser.add_argument('--test_file', default='', help='path to test file, if set to none, would use test_file path in the checkpoint file')
args = parser.parse_args()
with open(args.load_arg, 'r') as f:
jd = json.load(f)
jd = jd['args']
checkpoint_file = torch.load(args.load_check_point, map_location=lambda storage, loc: storage)
f_map = checkpoint_file['f_map']
l_map = checkpoint_file['l_map']
c_map = checkpoint_file['c_map']
in_doc_words = checkpoint_file['in_doc_words']
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
# load corpus
if args.test_file:
with codecs.open(args.test_file, 'r', 'utf-8') as f:
test_lines = f.readlines()
else:
with codecs.open(jd['test_file'], 'r', 'utf-8') as f:
test_lines = f.readlines()
# converting format
test_features, test_labels = utils.read_corpus(test_lines)
# construct dataset
test_dataset, forw_test, back_test = utils.construct_bucket_mean_vb_wc(test_features, test_labels, l_map, c_map, f_map, jd['caseless'])
test_dataset_loader = [torch.utils.data.DataLoader(tup, 50, shuffle=False, drop_last=False) for tup in test_dataset]
# build model
ner_model = LM_LSTM_CRF(len(l_map), len(c_map), jd['char_dim'], jd['char_hidden'], jd['char_layers'], jd['word_dim'], jd['word_hidden'], jd['word_layers'], len(f_map), jd['drop_out'], large_CRF=jd['small_crf'], if_highway=jd['high_way'], in_doc_words=in_doc_words, highway_layers = jd['highway_layers'])
ner_model.load_state_dict(checkpoint_file['state_dict'])
if args.gpu >= 0:
if_cuda = True
torch.cuda.set_device(args.gpu)
ner_model.cuda()
packer = CRFRepack_WC(len(l_map), True)
else:
if_cuda = False
packer = CRFRepack_WC(len(l_map), False)
evaluator = eval_wc(packer, l_map, args.eva_matrix)
print('start')
if 'f' in args.eva_matrix:
result = evaluator.calc_score(ner_model, test_dataset_loader)
for label, (test_f1, test_pre, test_rec, test_acc, msg) in result.items():
print(jd['checkpoint'] +' : %s : test_f1: %.4f test_rec: %.4f test_pre: %.4f test_acc: %.4f | %s\n' % (label, test_f1, test_rec, test_pre, test_acc, msg))
else:
test_acc = evaluator.calc_score(ner_model, test_dataset_loader)
print(jd['checkpoint'] + ' test_acc: %.4f\n' % (test_acc))
print('end')
| 3,402 | 36.395604 | 307 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/train_wc.py | from __future__ import print_function
import datetime
import time
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import codecs
from model.crf import *
from model.lm_lstm_crf import *
import model.utils as utils
from model.evaluator import eval_wc
import argparse
import json
import os
import sys
from tqdm import tqdm
import itertools
import functools
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Learning with LM-LSTM-CRF together with Language Model')
parser.add_argument('--rand_embedding', action='store_true', help='random initialize word embedding')
parser.add_argument('--emb_file', default='./embedding/glove.6B.100d.txt', help='path to pre-trained embedding')
parser.add_argument('--train_file', default='./data/ner/eng.train.iobes', help='path to training file')
parser.add_argument('--dev_file', default='./data/ner/eng.testa.iobes', help='path to development file')
parser.add_argument('--test_file', default='./data/ner/eng.testb.iobes', help='path to test file')
parser.add_argument('--gpu', type=int, default=0, help='gpu id')
parser.add_argument('--batch_size', type=int, default=10, help='batch_size')
parser.add_argument('--unk', default='unk', help='unknow-token in pre-trained embedding')
parser.add_argument('--char_hidden', type=int, default=300, help='dimension of char-level layers')
parser.add_argument('--word_hidden', type=int, default=300, help='dimension of word-level layers')
parser.add_argument('--drop_out', type=float, default=0.55, help='dropout ratio')
parser.add_argument('--epoch', type=int, default=200, help='maximum epoch number')
parser.add_argument('--start_epoch', type=int, default=0, help='start point of epoch')
parser.add_argument('--checkpoint', default='./checkpoint/', help='checkpoint path')
parser.add_argument('--caseless', action='store_true', help='caseless or not')
parser.add_argument('--char_dim', type=int, default=30, help='dimension of char embedding')
parser.add_argument('--word_dim', type=int, default=100, help='dimension of word embedding')
parser.add_argument('--char_layers', type=int, default=1, help='number of char level layers')
parser.add_argument('--word_layers', type=int, default=1, help='number of word level layers')
parser.add_argument('--lr', type=float, default=0.015, help='initial learning rate')
parser.add_argument('--lr_decay', type=float, default=0.05, help='decay ratio of learning rate')
parser.add_argument('--fine_tune', action='store_false', help='fine tune the diction of word embedding or not')
parser.add_argument('--load_check_point', default='', help='path previous checkpoint that want to be loaded')
parser.add_argument('--load_opt', action='store_true', help='also load optimizer from the checkpoint')
parser.add_argument('--update', choices=['sgd', 'adam'], default='sgd', help='optimizer choice')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for sgd')
parser.add_argument('--clip_grad', type=float, default=5.0, help='clip grad at')
parser.add_argument('--small_crf', action='store_false', help='use small crf instead of large crf, refer model.crf module for more details')
parser.add_argument('--mini_count', type=float, default=5, help='thresholds to replace rare words with <unk>')
parser.add_argument('--lambda0', type=float, default=1, help='lambda0')
parser.add_argument('--co_train', action='store_true', help='cotrain language model')
parser.add_argument('--patience', type=int, default=15, help='patience for early stop')
parser.add_argument('--high_way', action='store_true', help='use highway layers')
parser.add_argument('--highway_layers', type=int, default=1, help='number of highway layers')
parser.add_argument('--eva_matrix', choices=['a', 'fa'], default='fa', help='use f1 and accuracy or accuracy alone')
parser.add_argument('--least_iters', type=int, default=50, help='at least train how many epochs before stop')
parser.add_argument('--shrink_embedding', action='store_true', help='shrink the embedding dictionary to corpus (open this if pre-trained embedding dictionary is too large, but disable this may yield better results on external corpus)')
args = parser.parse_args()
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
print('setting:')
print(args)
# load corpus
print('loading corpus')
with codecs.open(args.train_file, 'r', 'utf-8') as f:
lines = f.readlines()
with codecs.open(args.dev_file, 'r', 'utf-8') as f:
dev_lines = f.readlines()
with codecs.open(args.test_file, 'r', 'utf-8') as f:
test_lines = f.readlines()
dev_features, dev_labels = utils.read_corpus(dev_lines)
test_features, test_labels = utils.read_corpus(test_lines)
if args.load_check_point:
if os.path.isfile(args.load_check_point):
print("loading checkpoint: '{}'".format(args.load_check_point))
checkpoint_file = torch.load(args.load_check_point)
args.start_epoch = checkpoint_file['epoch']
f_map = checkpoint_file['f_map']
l_map = checkpoint_file['l_map']
c_map = checkpoint_file['c_map']
in_doc_words = checkpoint_file['in_doc_words']
train_features, train_labels = utils.read_corpus(lines)
else:
print("no checkpoint found at: '{}'".format(args.load_check_point))
else:
print('constructing coding table')
# converting format
train_features, train_labels, f_map, l_map, c_map = utils.generate_corpus_char(lines, if_shrink_c_feature=True, c_thresholds=args.mini_count, if_shrink_w_feature=False)
f_set = {v for v in f_map}
f_map = utils.shrink_features(f_map, train_features, args.mini_count)
if args.rand_embedding:
print("embedding size: '{}'".format(len(f_map)))
in_doc_words = len(f_map)
else:
dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), dev_features), f_set)
dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), test_features), dt_f_set)
print("feature size: '{}'".format(len(f_map)))
print('loading embedding')
if args.fine_tune: # which means does not do fine-tune
f_map = {'<eof>': 0}
f_map, embedding_tensor, in_doc_words = utils.load_embedding_wlm(args.emb_file, ' ', f_map, dt_f_set, args.caseless, args.unk, args.word_dim, shrink_to_corpus=args.shrink_embedding)
print("embedding size: '{}'".format(len(f_map)))
l_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), dev_labels))
l_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), test_labels), l_set)
for label in l_set:
if label not in l_map:
l_map[label] = len(l_map)
print('constructing dataset')
# construct dataset
dataset, forw_corp, back_corp = utils.construct_bucket_mean_vb_wc(train_features, train_labels, l_map, c_map, f_map, args.caseless)
dev_dataset, forw_dev, back_dev = utils.construct_bucket_mean_vb_wc(dev_features, dev_labels, l_map, c_map, f_map, args.caseless)
test_dataset, forw_test, back_test = utils.construct_bucket_mean_vb_wc(test_features, test_labels, l_map, c_map, f_map, args.caseless)
dataset_loader = [torch.utils.data.DataLoader(tup, args.batch_size, shuffle=True, drop_last=False) for tup in dataset]
dev_dataset_loader = [torch.utils.data.DataLoader(tup, 50, shuffle=False, drop_last=False) for tup in dev_dataset]
test_dataset_loader = [torch.utils.data.DataLoader(tup, 50, shuffle=False, drop_last=False) for tup in test_dataset]
# build model
print('building model')
ner_model = LM_LSTM_CRF(len(l_map), len(c_map), args.char_dim, args.char_hidden, args.char_layers, args.word_dim, args.word_hidden, args.word_layers, len(f_map), args.drop_out, large_CRF=args.small_crf, if_highway=args.high_way, in_doc_words=in_doc_words, highway_layers = args.highway_layers)
if args.load_check_point:
ner_model.load_state_dict(checkpoint_file['state_dict'])
else:
if not args.rand_embedding:
ner_model.load_pretrained_word_embedding(embedding_tensor)
ner_model.rand_init(init_word_embedding=args.rand_embedding)
if args.update == 'sgd':
optimizer = optim.SGD(ner_model.parameters(), lr=args.lr, momentum=args.momentum)
elif args.update == 'adam':
optimizer = optim.Adam(ner_model.parameters(), lr=args.lr)
if args.load_check_point and args.load_opt:
optimizer.load_state_dict(checkpoint_file['optimizer'])
crit_lm = nn.CrossEntropyLoss()
crit_ner = CRFLoss_vb(len(l_map), l_map['<start>'], l_map['<pad>'])
if args.gpu >= 0:
if_cuda = True
print('device: ' + str(args.gpu))
torch.cuda.set_device(args.gpu)
crit_ner.cuda()
crit_lm.cuda()
ner_model.cuda()
packer = CRFRepack_WC(len(l_map), True)
else:
if_cuda = False
packer = CRFRepack_WC(len(l_map), False)
tot_length = sum(map(lambda t: len(t), dataset_loader))
best_f1 = float('-inf')
best_acc = float('-inf')
track_list = list()
start_time = time.time()
epoch_list = range(args.start_epoch, args.start_epoch + args.epoch)
patience_count = 0
evaluator = eval_wc(packer, l_map, args.eva_matrix)
for epoch_idx, args.start_epoch in enumerate(epoch_list):
epoch_loss = 0
ner_model.train()
for f_f, f_p, b_f, b_p, w_f, tg_v, mask_v, len_v in tqdm(
itertools.chain.from_iterable(dataset_loader), mininterval=2,
desc=' - Tot it %d (epoch %d)' % (tot_length, args.start_epoch), leave=False, file=sys.stdout):
f_f, f_p, b_f, b_p, w_f, tg_v, mask_v = packer.repack_vb(f_f, f_p, b_f, b_p, w_f, tg_v, mask_v, len_v)
ner_model.zero_grad()
scores = ner_model(f_f, f_p, b_f, b_p, w_f)
loss = crit_ner(scores, tg_v, mask_v)
epoch_loss += utils.to_scalar(loss)
if args.co_train:
cf_p = f_p[0:-1, :].contiguous()
cb_p = b_p[1:, :].contiguous()
cf_y = w_f[1:, :].contiguous()
cb_y = w_f[0:-1, :].contiguous()
cfs, _ = ner_model.word_pre_train_forward(f_f, cf_p)
loss = loss + args.lambda0 * crit_lm(cfs, cf_y.view(-1))
cbs, _ = ner_model.word_pre_train_backward(b_f, cb_p)
loss = loss + args.lambda0 * crit_lm(cbs, cb_y.view(-1))
loss.backward()
nn.utils.clip_grad_norm_(ner_model.parameters(), args.clip_grad)
optimizer.step()
epoch_loss /= tot_length
# update lr
if args.update == 'sgd':
utils.adjust_learning_rate(optimizer, args.lr / (1 + (args.start_epoch + 1) * args.lr_decay))
# eval & save check_point
if 'f' in args.eva_matrix:
dev_result = evaluator.calc_score(ner_model, dev_dataset_loader)
for label, (dev_f1, dev_pre, dev_rec, dev_acc, msg) in dev_result.items():
print('DEV : %s : dev_f1: %.4f dev_rec: %.4f dev_pre: %.4f dev_acc: %.4f | %s\n' % (label, dev_f1, dev_rec, dev_pre, dev_acc, msg))
(dev_f1, dev_pre, dev_rec, dev_acc, msg) = dev_result['total']
if dev_f1 > best_f1:
patience_count = 0
best_f1 = dev_f1
test_result = evaluator.calc_score(ner_model, test_dataset_loader)
for label, (test_f1, test_pre, test_rec, test_acc, msg) in test_result.items():
print('TEST : %s : test_f1: %.4f test_rec: %.4f test_pre: %.4f test_acc: %.4f | %s\n' % (label, test_f1, test_rec, test_pre, test_acc, msg))
(test_f1, test_pre, test_rec, test_acc, msg) = test_result['total']
track_list.append(
{'loss': epoch_loss, 'dev_f1': dev_f1, 'dev_acc': dev_acc, 'test_f1': test_f1,
'test_acc': test_acc})
print(
'(loss: %.4f, epoch: %d, dev F1 = %.4f, dev acc = %.4f, F1 on test = %.4f, acc on test= %.4f), saving...' %
(epoch_loss,
args.start_epoch,
dev_f1,
dev_acc,
test_f1,
test_acc))
try:
utils.save_checkpoint({
'epoch': args.start_epoch,
'state_dict': ner_model.state_dict(),
'optimizer': optimizer.state_dict(),
'f_map': f_map,
'l_map': l_map,
'c_map': c_map,
'in_doc_words': in_doc_words
}, {'track_list': track_list,
'args': vars(args)
}, args.checkpoint + 'cwlm_lstm_crf')
except Exception as inst:
print(inst)
else:
patience_count += 1
print('(loss: %.4f, epoch: %d, dev F1 = %.4f, dev acc = %.4f)' %
(epoch_loss,
args.start_epoch,
dev_f1,
dev_acc))
track_list.append({'loss': epoch_loss, 'dev_f1': dev_f1, 'dev_acc': dev_acc})
else:
dev_acc = evaluator.calc_score(ner_model, dev_dataset_loader)
if dev_acc > best_acc:
patience_count = 0
best_acc = dev_acc
test_acc = evaluator.calc_score(ner_model, test_dataset_loader)
track_list.append(
{'loss': epoch_loss, 'dev_acc': dev_acc, 'test_acc': test_acc})
print(
'(loss: %.4f, epoch: %d, dev acc = %.4f, acc on test= %.4f), saving...' %
(epoch_loss,
args.start_epoch,
dev_acc,
test_acc))
try:
utils.save_checkpoint({
'epoch': args.start_epoch,
'state_dict': ner_model.state_dict(),
'optimizer': optimizer.state_dict(),
'f_map': f_map,
'l_map': l_map,
'c_map': c_map,
'in_doc_words': in_doc_words
}, {'track_list': track_list,
'args': vars(args)
}, args.checkpoint + 'cwlm_lstm_crf')
except Exception as inst:
print(inst)
else:
patience_count += 1
print('(loss: %.4f, epoch: %d, dev acc = %.4f)' %
(epoch_loss,
args.start_epoch,
dev_acc))
track_list.append({'loss': epoch_loss, 'dev_acc': dev_acc})
print('epoch: ' + str(args.start_epoch) + '\t in ' + str(args.epoch) + ' take: ' + str(
time.time() - start_time) + ' s')
if patience_count >= args.patience and args.start_epoch >= args.least_iters:
break
#print best
if 'f' in args.eva_matrix:
eprint(args.checkpoint + ' dev_f1: %.4f dev_rec: %.4f dev_pre: %.4f dev_acc: %.4f test_f1: %.4f test_rec: %.4f test_pre: %.4f test_acc: %.4f\n' % (dev_f1, dev_rec, dev_pre, dev_acc, test_f1, test_rec, test_pre, test_acc))
else:
eprint(args.checkpoint + ' dev_acc: %.4f test_acc: %.4f\n' % (dev_acc, test_acc))
# printing summary
print('setting:')
print(args)
| 15,992 | 48.82243 | 297 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/seq_w.py | from __future__ import print_function
import datetime
import time
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import codecs
from model.crf import *
from model.lstm_crf import *
import model.utils as utils
from model.predictor import predict_w
import argparse
import json
import os
import sys
from tqdm import tqdm
import itertools
import functools
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Evaluating LM-BLSTM-CRF')
parser.add_argument('--load_arg', default='./checkpoint/ner/ner_4_cwlm_lstm_crf.json', help='path to arg json')
parser.add_argument('--load_check_point', default='./checkpoint/ner/ner_4_cwlm_lstm_crf.model', help='path to model checkpoint file')
parser.add_argument('--gpu',type=int, default=0, help='gpu id')
parser.add_argument('--decode_type', choices=['label', 'string'], default='string', help='type of decode function, set `label` to couple label with text, or set `string` to insert label into test')
parser.add_argument('--batch_size', type=int, default=50, help='size of batch')
parser.add_argument('--input_file', default='data/ner2003/test.txt', help='path to input un-annotated corpus')
parser.add_argument('--output_file', default='output.txt', help='path to output file')
args = parser.parse_args()
print('loading dictionary')
with open(args.load_arg, 'r') as f:
jd = json.load(f)
jd = jd['args']
checkpoint_file = torch.load(args.load_check_point, map_location=lambda storage, loc: storage)
f_map = checkpoint_file['f_map']
l_map = checkpoint_file['l_map']
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
# loading corpus
print('loading corpus')
with codecs.open(args.input_file, 'r', 'utf-8') as f:
lines = f.readlines()
# converting format
features = utils.read_features(lines)
# build model
print('loading model')
ner_model = LSTM_CRF(len(f_map), len(l_map), jd['embedding_dim'], jd['hidden'], jd['layers'], jd['drop_out'], large_CRF=jd['small_crf'])
ner_model.load_state_dict(checkpoint_file['state_dict'])
if args.gpu >= 0:
if_cuda = True
torch.cuda.set_device(args.gpu)
ner_model.cuda()
else:
if_cuda = False
decode_label = (args.decode_type == 'label')
predictor = predict_w(if_cuda, f_map, l_map, f_map['<eof>'], l_map['<pad>'], l_map['<start>'], decode_label, args.batch_size, jd['caseless'])
print('annotating')
with open(args.output_file, 'w') as fout:
predictor.output_batch(ner_model, features, fout) | 2,630 | 36.056338 | 201 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/train_w.py | from __future__ import print_function
import datetime
import time
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import codecs
from model.crf import *
from model.lstm_crf import *
import model.utils as utils
from model.evaluator import eval_w
import argparse
import json
import os
import sys
from tqdm import tqdm
import itertools
import functools
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Learning with BLSTM-CRF')
parser.add_argument('--rand_embedding', action='store_true', help='random initialize word embedding')
parser.add_argument('--emb_file', default='./embedding/glove.6B.100d.txt', help='path to pre-trained embedding')
parser.add_argument('--train_file', default='./data/ner2003/eng.train.iobes', help='path to training file')
parser.add_argument('--dev_file', default='./data/ner2003/eng.testa.iobes', help='path to development file')
parser.add_argument('--test_file', default='./data/ner2003/eng.testb.iobes', help='path to test file')
parser.add_argument('--gpu', type=int, default=0, help='gpu id, set to -1 if use cpu mode')
parser.add_argument('--batch_size', type=int, default=10, help='batch size (10)')
parser.add_argument('--unk', default='unk', help='unknow-token in pre-trained embedding')
parser.add_argument('--checkpoint', default='./checkpoint/', help='path to checkpoint prefix')
parser.add_argument('--hidden', type=int, default=100, help='hidden dimension')
parser.add_argument('--drop_out', type=float, default=0.55, help='dropout ratio')
parser.add_argument('--epoch', type=int, default=200, help='maximum epoch number')
parser.add_argument('--start_epoch', type=int, default=0, help='start epoch idx')
parser.add_argument('--caseless', action='store_true', help='caseless or not')
parser.add_argument('--embedding_dim', type=int, default=100, help='dimension for word embedding')
parser.add_argument('--layers', type=int, default=1, help='number of lstm layers')
parser.add_argument('--lr', type=float, default=0.015, help='initial learning rate')
parser.add_argument('--lr_decay', type=float, default=0.05, help='decay ratio of learning rate')
parser.add_argument('--fine_tune', action='store_false', help='fine tune pre-trained embedding dictionary')
parser.add_argument('--load_check_point', default='', help='path of checkpoint')
parser.add_argument('--load_opt', action='store_true', help='load optimizer from ')
parser.add_argument('--update', choices=['sgd', 'adam'], default='sgd', help='optimizer method')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for sgd')
parser.add_argument('--clip_grad', type=float, default=5.0, help='grad clip at')
parser.add_argument('--small_crf', action='store_false', help='use small crf instead of large crf, refer model.crf module for more details')
parser.add_argument('--mini_count', type=float, default=5, help='thresholds to replace rare words with <unk>')
parser.add_argument('--eva_matrix', choices=['a', 'fa'], default='fa', help='use f1 and accuracy or accuracy alone')
parser.add_argument('--patience', type=int, default=15, help='patience for early stop')
parser.add_argument('--least_iters', type=int, default=50, help='at least train how many epochs before stop')
parser.add_argument('--shrink_embedding', action='store_true', help='shrink the embedding dictionary to corpus (open this if pre-trained embedding dictionary is too large, but disable this may yield better results on external corpus)')
args = parser.parse_args()
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
print('setting:')
print(args)
# load corpus
print('loading corpus')
with codecs.open(args.train_file, 'r', 'utf-8') as f:
lines = f.readlines()
with codecs.open(args.dev_file, 'r', 'utf-8') as f:
dev_lines = f.readlines()
with codecs.open(args.test_file, 'r', 'utf-8') as f:
test_lines = f.readlines()
# converting format
dev_features, dev_labels = utils.read_corpus(dev_lines)
test_features, test_labels = utils.read_corpus(test_lines)
if args.load_check_point:
if os.path.isfile(args.load_check_point):
print("loading checkpoint: '{}'".format(args.load_check_point))
checkpoint_file = torch.load(args.load_check_point)
args.start_epoch = checkpoint_file['epoch']
f_map = checkpoint_file['f_map']
l_map = checkpoint_file['l_map']
train_features, train_labels = utils.read_corpus(lines)
else:
print("no checkpoint found at: '{}'".format(args.load_check_point))
else:
print('constructing coding table')
# converting format
train_features, train_labels, f_map, l_map = utils.generate_corpus(lines, if_shrink_feature=True, thresholds=0)
f_set = {v for v in f_map}
f_map = utils.shrink_features(f_map, train_features, args.mini_count)
dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), dev_features), f_set)
dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), test_features), dt_f_set)
if not args.rand_embedding:
print("feature size: '{}'".format(len(f_map)))
print('loading embedding')
if args.fine_tune: # which means does not do fine-tune
f_map = {'<eof>': 0}
f_map, embedding_tensor, in_doc_words = utils.load_embedding_wlm(args.emb_file, ' ', f_map, dt_f_set,args.caseless,args.unk, args.embedding_dim, shrink_to_corpus=args.shrink_embedding)
print("embedding size: '{}'".format(len(f_map)))
l_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), dev_labels))
l_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), test_labels), l_set)
for label in l_set:
if label not in l_map:
l_map[label] = len(l_map)
# construct dataset
dataset = utils.construct_bucket_mean_vb(train_features, train_labels, f_map, l_map, args.caseless)
dev_dataset = utils.construct_bucket_mean_vb(dev_features, dev_labels, f_map, l_map, args.caseless)
test_dataset = utils.construct_bucket_mean_vb(test_features, test_labels, f_map, l_map, args.caseless)
dataset_loader = [torch.utils.data.DataLoader(tup, args.batch_size, shuffle=True, drop_last=False) for tup in dataset]
dev_dataset_loader = [torch.utils.data.DataLoader(tup, 50, shuffle=False, drop_last=False) for tup in dev_dataset]
test_dataset_loader = [torch.utils.data.DataLoader(tup, 50, shuffle=False, drop_last=False) for tup in test_dataset]
# build model
print('building model')
ner_model = LSTM_CRF(len(f_map), len(l_map), args.embedding_dim, args.hidden, args.layers, args.drop_out, large_CRF=args.small_crf)
if args.load_check_point:
ner_model.load_state_dict(checkpoint_file['state_dict'])
else:
if not args.rand_embedding:
ner_model.load_pretrained_embedding(embedding_tensor)
print('random initialization')
ner_model.rand_init(init_embedding=args.rand_embedding)
if args.update == 'sgd':
optimizer = optim.SGD(ner_model.parameters(), lr=args.lr, momentum=args.momentum)
elif args.update == 'adam':
optimizer = optim.Adam(ner_model.parameters(), lr=args.lr)
if args.load_check_point and args.load_opt:
optimizer.load_state_dict(checkpoint_file['optimizer'])
crit = CRFLoss_vb(len(l_map), l_map['<start>'], l_map['<pad>'])
if args.gpu >= 0:
if_cuda = True
print('device: ' + str(args.gpu))
torch.cuda.set_device(args.gpu)
crit.cuda()
ner_model.cuda()
packer = CRFRepack(len(l_map), True)
else:
if_cuda = False
packer = CRFRepack(len(l_map), False)
if args.load_check_point:
dev_f1, dev_acc = eval_batch(ner_model, dev_dataset_loader, pack, l_map)
test_f1, test_acc = eval_batch(ner_model, test_dataset_loader, pack, l_map)
print('(checkpoint: dev F1 = %.4f, dev acc = %.4f, F1 on test = %.4f, acc on test= %.4f)' %
(dev_f1,
dev_acc,
test_f1,
test_acc))
tot_length = sum(map(lambda t: len(t), dataset_loader))
best_f1 = float('-inf')
best_acc = float('-inf')
track_list = list()
start_time = time.time()
epoch_list = range(args.start_epoch, args.start_epoch + args.epoch)
patience_count = 0
evaluator = eval_w(packer, l_map, args.eva_matrix)
for epoch_idx, args.start_epoch in enumerate(epoch_list):
epoch_loss = 0
ner_model.train()
for feature, tg, mask in tqdm(
itertools.chain.from_iterable(dataset_loader), mininterval=2,
desc=' - Tot it %d (epoch %d)' % (tot_length, args.start_epoch), leave=False, file=sys.stdout):
fea_v, tg_v, mask_v = packer.repack_vb(feature, tg, mask)
ner_model.zero_grad()
scores, hidden = ner_model.forward(fea_v)
loss = crit.forward(scores, tg_v, mask_v)
loss.backward()
nn.utils.clip_grad_norm_(ner_model.parameters(), args.clip_grad)
optimizer.step()
epoch_loss += utils.to_scalar(loss)
# update lr
utils.adjust_learning_rate(optimizer, args.lr / (1 + (args.start_epoch + 1) * args.lr_decay))
# average
epoch_loss /= tot_length
# eval & save check_point
if 'f' in args.eva_matrix:
dev_result = evaluator.calc_score(ner_model, dev_dataset_loader)
for label, (dev_f1, dev_pre, dev_rec, dev_acc, msg) in dev_result.items():
print('DEV : %s : dev_f1: %.4f dev_rec: %.4f dev_pre: %.4f dev_acc: %.4f | %s\n' % (label, dev_f1, dev_pre, dev_rec, dev_acc, msg))
(dev_f1, dev_pre, dev_rec, dev_acc, msg) = dev_result['total']
if dev_f1 > best_f1:
patience_count = 0
best_f1 = dev_f1
test_result = evaluator.calc_score(ner_model, test_dataset_loader)
for label, (test_f1, test_pre, test_rec, test_acc, msg) in test_result.items():
print('TEST : %s : test_f1: %.4f test_rec: %.4f test_pre: %.4f test_acc: %.4f | %s\n' % (label, test_f1, test_rec, test_pre, test_acc, msg))
(test_f1, test_rec, test_pre, test_acc, msg) = test_result['total']
track_list.append(
{'loss': epoch_loss, 'dev_f1': dev_f1, 'dev_acc': dev_acc, 'test_f1': test_f1,
'test_acc': test_acc})
print(
'(loss: %.4f, epoch: %d, dev F1 = %.4f, dev acc = %.4f, F1 on test = %.4f, acc on test= %.4f), saving...' %
(epoch_loss,
args.start_epoch,
dev_f1,
dev_acc,
test_f1,
test_acc))
try:
utils.save_checkpoint({
'epoch': args.start_epoch,
'state_dict': ner_model.state_dict(),
'optimizer': optimizer.state_dict(),
'f_map': f_map,
'l_map': l_map,
}, {'track_list': track_list,
'args': vars(args)
}, args.checkpoint + 'lstm_crf')
except Exception as inst:
print(inst)
else:
patience_count += 1
print('(loss: %.4f, epoch: %d, dev F1 = %.4f, dev acc = %.4f)' %
(epoch_loss,
args.start_epoch,
dev_f1,
dev_acc))
track_list.append({'loss': epoch_loss, 'dev_f1': dev_f1, 'dev_acc': dev_acc})
else:
dev_acc = evaluator.calc_score(ner_model, dev_dataset_loader)
if dev_acc > best_acc:
patience_count = 0
best_acc = dev_acc
test_acc = evaluator.calc_score(ner_model, test_dataset_loader)
track_list.append(
{'loss': epoch_loss, 'dev_acc': dev_acc, 'test_acc': test_acc})
print(
'(loss: %.4f, epoch: %d, dev acc = %.4f, acc on test= %.4f), saving...' %
(epoch_loss,
args.start_epoch,
dev_acc,
test_acc))
try:
utils.save_checkpoint({
'epoch': args.start_epoch,
'state_dict': ner_model.state_dict(),
'optimizer': optimizer.state_dict(),
'f_map': f_map,
'l_map': l_map,
}, {'track_list': track_list,
'args': vars(args)
}, args.checkpoint + 'lstm_crf')
except Exception as inst:
print(inst)
else:
patience_count += 1
print('(loss: %.4f, epoch: %d, dev acc = %.4f)' %
(epoch_loss,
args.start_epoch,
dev_acc))
track_list.append({'loss': epoch_loss, 'dev_acc': dev_acc})
print('epoch: ' + str(args.start_epoch) + '\t in ' + str(args.epoch) + ' take: ' + str(
time.time() - start_time) + ' s')
if patience_count >= args.patience and args.start_epoch >= args.least_iters:
break
#print best
if 'f' in args.eva_matrix:
eprint(args.checkpoint + ' dev_f1: %.4f dev_rec: %.4f dev_pre: %.4f dev_acc: %.4f test_f1: %.4f test_rec: %.4f test_pre: %.4f test_acc: %.4f\n' % (dev_f1, dev_rec, dev_pre, dev_acc, test_f1, test_rec, test_pre, test_acc))
else:
eprint(args.checkpoint + ' dev_acc: %.4f test_acc: %.4f\n' % (dev_acc, test_acc))
# printing summary
print('setting:')
print(args)
| 14,299 | 45.278317 | 239 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/seq_wc.py | from __future__ import print_function
import datetime
import time
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import codecs
from model.crf import *
from model.lm_lstm_crf import *
import model.utils as utils
from model.predictor import predict_wc
import argparse
import json
import os
import sys
from tqdm import tqdm
import itertools
import functools
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Evaluating LM-BLSTM-CRF')
parser.add_argument('--load_arg', default='./checkpoint/ner/ner_4_cwlm_lstm_crf.json', help='path to arg json')
parser.add_argument('--load_check_point', default='./checkpoint/ner/ner_4_cwlm_lstm_crf.model', help='path to model checkpoint file')
parser.add_argument('--gpu',type=int, default=0, help='gpu id')
parser.add_argument('--decode_type', choices=['label', 'string'], default='string', help='type of decode function, set `label` to couple label with text, or set `string` to insert label into test')
parser.add_argument('--batch_size', type=int, default=50, help='size of batch')
parser.add_argument('--input_file', default='data/ner2003/test.txt', help='path to input un-annotated corpus')
parser.add_argument('--output_file', default='output.txt', help='path to output file')
args = parser.parse_args()
print('loading dictionary')
with open(args.load_arg, 'r') as f:
jd = json.load(f)
jd = jd['args']
checkpoint_file = torch.load(args.load_check_point, map_location=lambda storage, loc: storage)
f_map = checkpoint_file['f_map']
l_map = checkpoint_file['l_map']
c_map = checkpoint_file['c_map']
in_doc_words = checkpoint_file['in_doc_words']
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
# loading corpus
print('loading corpus')
with codecs.open(args.input_file, 'r', 'utf-8') as f:
lines = f.readlines()
# converting format
features = utils.read_features(lines)
# build model
print('loading model')
ner_model = LM_LSTM_CRF(len(l_map), len(c_map), jd['char_dim'], jd['char_hidden'], jd['char_layers'], jd['word_dim'], jd['word_hidden'], jd['word_layers'], len(f_map), jd['drop_out'], large_CRF=jd['small_crf'], if_highway=jd['high_way'], in_doc_words=in_doc_words, highway_layers = jd['highway_layers'])
ner_model.load_state_dict(checkpoint_file['state_dict'])
if args.gpu >= 0:
if_cuda = True
torch.cuda.set_device(args.gpu)
ner_model.cuda()
else:
if_cuda = False
decode_label = (args.decode_type == 'label')
predictor = predict_wc(if_cuda, f_map, c_map, l_map, f_map['<eof>'], c_map['\n'], l_map['<pad>'], l_map['<start>'], decode_label, args.batch_size, jd['caseless'])
print('annotating')
with open(args.output_file, 'w') as fout:
predictor.output_batch(ner_model, features, fout) | 2,909 | 39.416667 | 307 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/eval_w.py |
from __future__ import print_function
import datetime
import time
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import codecs
from model.crf import *
from model.lstm_crf import *
import model.utils as utils
from model.evaluator import eval_w
import argparse
import json
import os
import sys
from tqdm import tqdm
import itertools
import functools
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Evaluating BLSTM-CRF')
parser.add_argument('--load_arg', default='./checkpoint/soa/check_wc_p_char_lstm_crf.json', help='arg json file path')
parser.add_argument('--load_check_point', default='./checkpoint/soa/check_wc_p_char_lstm_crf.model', help='checkpoint path')
parser.add_argument('--gpu',type=int, default=0, help='gpu id')
parser.add_argument('--eva_matrix', choices=['a', 'fa'], default='fa', help='use f1 and accuracy or accuracy alone')
parser.add_argument('--test_file', default='', help='path to test file, if set to none, would use test_file path in the checkpoint file')
args = parser.parse_args()
with open(args.load_arg, 'r') as f:
jd = json.load(f)
jd = jd['args']
checkpoint_file = torch.load(args.load_check_point, map_location=lambda storage, loc: storage)
f_map = checkpoint_file['f_map']
l_map = checkpoint_file['l_map']
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
# load corpus
if args.test_file:
with codecs.open(args.test_file, 'r', 'utf-8') as f:
test_lines = f.readlines()
else:
with codecs.open(jd['test_file'], 'r', 'utf-8') as f:
test_lines = f.readlines()
# converting format
test_features, test_labels = utils.read_corpus(test_lines)
# construct dataset
test_dataset = utils.construct_bucket_mean_vb(test_features, test_labels, f_map, l_map, jd['caseless'])
test_dataset_loader = [torch.utils.data.DataLoader(tup, 50, shuffle=False, drop_last=False) for tup in test_dataset]
# build model
ner_model = LSTM_CRF(len(f_map), len(l_map), jd['embedding_dim'], jd['hidden'], jd['layers'], jd['drop_out'], large_CRF=jd['small_crf'])
ner_model.load_state_dict(checkpoint_file['state_dict'])
if args.gpu >= 0:
if_cuda = True
torch.cuda.set_device(args.gpu)
ner_model.cuda()
packer = CRFRepack(len(l_map), True)
else:
if_cuda = False
packer = CRFRepack(len(l_map), False)
evaluator = eval_w(packer, l_map, args.eva_matrix)
if 'f' in args.eva_matrix:
test_f1, test_pre, test_rec, test_acc = evaluator.calc_score(ner_model, test_dataset_loader)
print(jd['checkpoint'] + ' test_f1: %.4f test_rec: %.4f test_pre: %.4f test_acc: %.4f\n' % (test_f1, test_rec, test_pre, test_acc))
else:
test_acc = evaluator.calc_score(ner_model, test_dataset_loader)
print(jd['checkpoint'] + ' test_acc: %.4f\n' % (test_acc))
| 2,982 | 32.897727 | 141 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# LM-LSTM-CRF documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 14 03:49:01 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'LM-LSTM-CRF'
copyright = '2017, Liyuan Liu, Frank Xu, Jingbo Shang'
author = 'Liyuan Liu, Frank Xu, Jingbo Shang'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'LM-LSTM-CRFdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'LM-LSTM-CRF.tex', 'LM-LSTM-CRF Documentation',
'Liyuan Liu, Frank Xu, Jingbo Shang', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lm-lstm-crf', 'LM-LSTM-CRF Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LM-LSTM-CRF', 'LM-LSTM-CRF Documentation',
author, 'LM-LSTM-CRF', 'One line description of project.',
'Miscellaneous'),
]
| 5,569 | 29.773481 | 79 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/model/highway.py | """
.. module:: highway
:synopsis: highway network
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.nn as nn
import model.utils as utils
class hw(nn.Module):
"""Highway layers
args:
size: input and output dimension
dropout_ratio: dropout ratio
"""
def __init__(self, size, num_layers = 1, dropout_ratio = 0.5):
super(hw, self).__init__()
self.size = size
self.num_layers = num_layers
self.trans = nn.ModuleList()
self.gate = nn.ModuleList()
self.dropout = nn.Dropout(p=dropout_ratio)
for i in range(num_layers):
tmptrans = nn.Linear(size, size)
tmpgate = nn.Linear(size, size)
self.trans.append(tmptrans)
self.gate.append(tmpgate)
def rand_init(self):
"""
random initialization
"""
for i in range(self.num_layers):
utils.init_linear(self.trans[i])
utils.init_linear(self.gate[i])
def forward(self, x):
"""
update statics for f1 score
args:
x (ins_num, hidden_dim): input tensor
return:
output tensor (ins_num, hidden_dim)
"""
g = nn.functional.sigmoid(self.gate[0](x))
h = nn.functional.relu(self.trans[0](x))
x = g * h + (1 - g) * x
for i in range(1, self.num_layers):
x = self.dropout(x)
g = nn.functional.sigmoid(self.gate[i](x))
h = nn.functional.relu(self.trans[i](x))
x = g * h + (1 - g) * x
return x | 1,607 | 24.52381 | 66 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/model/lm_lstm_crf.py | """
.. module:: lm_lstm_crf
:synopsis: lm_lstm_crf
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import numpy as np
import model.crf as crf
import model.utils as utils
import model.highway as highway
class LM_LSTM_CRF(nn.Module):
"""LM_LSTM_CRF model
args:
tagset_size: size of label set
char_size: size of char dictionary
char_dim: size of char embedding
char_hidden_dim: size of char-level lstm hidden dim
char_rnn_layers: number of char-level lstm layers
embedding_dim: size of word embedding
word_hidden_dim: size of word-level blstm hidden dim
word_rnn_layers: number of word-level lstm layers
vocab_size: size of word dictionary
dropout_ratio: dropout ratio
large_CRF: use CRF_L or not, refer model.crf.CRF_L and model.crf.CRF_S for more details
if_highway: use highway layers or not
in_doc_words: number of words that occurred in the corpus (used for language model prediction)
highway_layers: number of highway layers
"""
def __init__(self, tagset_size, char_size, char_dim, char_hidden_dim, char_rnn_layers, embedding_dim, word_hidden_dim, word_rnn_layers, vocab_size, dropout_ratio, large_CRF=True, if_highway = False, in_doc_words = 2, highway_layers = 1):
super(LM_LSTM_CRF, self).__init__()
self.char_dim = char_dim
self.char_hidden_dim = char_hidden_dim
self.char_size = char_size
self.word_dim = embedding_dim
self.word_hidden_dim = word_hidden_dim
self.word_size = vocab_size
self.if_highway = if_highway
self.char_embeds = nn.Embedding(char_size, char_dim)
self.forw_char_lstm = nn.LSTM(char_dim, char_hidden_dim, num_layers=char_rnn_layers, bidirectional=False, dropout=dropout_ratio)
self.back_char_lstm = nn.LSTM(char_dim, char_hidden_dim, num_layers=char_rnn_layers, bidirectional=False, dropout=dropout_ratio)
self.char_rnn_layers = char_rnn_layers
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.word_lstm = nn.LSTM(embedding_dim + char_hidden_dim * 2, word_hidden_dim // 2, num_layers=word_rnn_layers, bidirectional=True, dropout=dropout_ratio)
self.word_rnn_layers = word_rnn_layers
self.dropout = nn.Dropout(p=dropout_ratio)
self.tagset_size = tagset_size
if large_CRF:
self.crf = crf.CRF_L(word_hidden_dim, tagset_size)
else:
self.crf = crf.CRF_S(word_hidden_dim, tagset_size)
if if_highway:
self.forw2char = highway.hw(char_hidden_dim, num_layers=highway_layers, dropout_ratio=dropout_ratio)
self.back2char = highway.hw(char_hidden_dim, num_layers=highway_layers, dropout_ratio=dropout_ratio)
self.forw2word = highway.hw(char_hidden_dim, num_layers=highway_layers, dropout_ratio=dropout_ratio)
self.back2word = highway.hw(char_hidden_dim, num_layers=highway_layers, dropout_ratio=dropout_ratio)
self.fb2char = highway.hw(2 * char_hidden_dim, num_layers=highway_layers, dropout_ratio=dropout_ratio)
self.char_pre_train_out = nn.Linear(char_hidden_dim, char_size)
self.word_pre_train_out = nn.Linear(char_hidden_dim, in_doc_words)
self.batch_size = 1
self.word_seq_length = 1
def set_batch_size(self, bsize):
"""
set batch size
"""
self.batch_size = bsize
def set_batch_seq_size(self, sentence):
"""
set batch size and sequence length
"""
tmp = sentence.size()
self.word_seq_length = tmp[0]
self.batch_size = tmp[1]
def rand_init_embedding(self):
"""
random initialize char-level embedding
"""
utils.init_embedding(self.char_embeds.weight)
def load_pretrained_word_embedding(self, pre_word_embeddings):
"""
load pre-trained word embedding
args:
pre_word_embeddings (self.word_size, self.word_dim) : pre-trained embedding
"""
assert (pre_word_embeddings.size()[1] == self.word_dim)
self.word_embeds.weight = nn.Parameter(pre_word_embeddings)
def rand_init(self, init_char_embedding=True, init_word_embedding=False):
"""
random initialization
args:
init_char_embedding: random initialize char embedding or not
init_word_embedding: random initialize word embedding or not
"""
if init_char_embedding:
utils.init_embedding(self.char_embeds.weight)
if init_word_embedding:
utils.init_embedding(self.word_embeds.weight)
if self.if_highway:
self.forw2char.rand_init()
self.back2char.rand_init()
self.forw2word.rand_init()
self.back2word.rand_init()
self.fb2char.rand_init()
utils.init_lstm(self.forw_char_lstm)
utils.init_lstm(self.back_char_lstm)
utils.init_lstm(self.word_lstm)
utils.init_linear(self.char_pre_train_out)
utils.init_linear(self.word_pre_train_out)
self.crf.rand_init()
def word_pre_train_forward(self, sentence, position, hidden=None):
"""
output of forward language model
args:
sentence (char_seq_len, batch_size): char-level representation of sentence
position (word_seq_len, batch_size): position of blank space in char-level representation of sentence
hidden: initial hidden state
return:
language model output (word_seq_len, in_doc_word), hidden
"""
embeds = self.char_embeds(sentence)
d_embeds = self.dropout(embeds)
lstm_out, hidden = self.forw_char_lstm(d_embeds)
tmpsize = position.size()
position = position.unsqueeze(2).expand(tmpsize[0], tmpsize[1], self.char_hidden_dim)
select_lstm_out = torch.gather(lstm_out, 0, position)
d_lstm_out = self.dropout(select_lstm_out).view(-1, self.char_hidden_dim)
if self.if_highway:
char_out = self.forw2word(d_lstm_out)
d_char_out = self.dropout(char_out)
else:
d_char_out = d_lstm_out
pre_score = self.word_pre_train_out(d_char_out)
return pre_score, hidden
def word_pre_train_backward(self, sentence, position, hidden=None):
"""
output of backward language model
args:
sentence (char_seq_len, batch_size): char-level representation of sentence (inverse order)
position (word_seq_len, batch_size): position of blank space in inversed char-level representation of sentence
hidden: initial hidden state
return:
language model output (word_seq_len, in_doc_word), hidden
"""
embeds = self.char_embeds(sentence)
d_embeds = self.dropout(embeds)
lstm_out, hidden = self.back_char_lstm(d_embeds)
tmpsize = position.size()
position = position.unsqueeze(2).expand(tmpsize[0], tmpsize[1], self.char_hidden_dim)
select_lstm_out = torch.gather(lstm_out, 0, position)
d_lstm_out = self.dropout(select_lstm_out).view(-1, self.char_hidden_dim)
if self.if_highway:
char_out = self.back2word(d_lstm_out)
d_char_out = self.dropout(char_out)
else:
d_char_out = d_lstm_out
pre_score = self.word_pre_train_out(d_char_out)
return pre_score, hidden
def forward(self, forw_sentence, forw_position, back_sentence, back_position, word_seq, hidden=None):
'''
args:
forw_sentence (char_seq_len, batch_size) : char-level representation of sentence
forw_position (word_seq_len, batch_size) : position of blank space in char-level representation of sentence
back_sentence (char_seq_len, batch_size) : char-level representation of sentence (inverse order)
back_position (word_seq_len, batch_size) : position of blank space in inversed char-level representation of sentence
word_seq (word_seq_len, batch_size) : word-level representation of sentence
hidden: initial hidden state
return:
crf output (word_seq_len, batch_size, tag_size, tag_size), hidden
'''
self.set_batch_seq_size(forw_position)
#embedding layer
forw_emb = self.char_embeds(forw_sentence)
back_emb = self.char_embeds(back_sentence)
#dropout
d_f_emb = self.dropout(forw_emb)
d_b_emb = self.dropout(back_emb)
#forward the whole sequence
forw_lstm_out, _ = self.forw_char_lstm(d_f_emb)#seq_len_char * batch * char_hidden_dim
back_lstm_out, _ = self.back_char_lstm(d_b_emb)#seq_len_char * batch * char_hidden_dim
#select predict point
forw_position = forw_position.unsqueeze(2).expand(self.word_seq_length, self.batch_size, self.char_hidden_dim)
select_forw_lstm_out = torch.gather(forw_lstm_out, 0, forw_position)
back_position = back_position.unsqueeze(2).expand(self.word_seq_length, self.batch_size, self.char_hidden_dim)
select_back_lstm_out = torch.gather(back_lstm_out, 0, back_position)
fb_lstm_out = self.dropout(torch.cat((select_forw_lstm_out, select_back_lstm_out), dim=2))
if self.if_highway:
char_out = self.fb2char(fb_lstm_out)
d_char_out = self.dropout(char_out)
else:
d_char_out = fb_lstm_out
#word
word_emb = self.word_embeds(word_seq)
d_word_emb = self.dropout(word_emb)
#combine
word_input = torch.cat((d_word_emb, d_char_out), dim = 2)
#word level lstm
lstm_out, _ = self.word_lstm(word_input)
d_lstm_out = self.dropout(lstm_out)
#convert to crf
crf_out = self.crf(d_lstm_out)
crf_out = crf_out.view(self.word_seq_length, self.batch_size, self.tagset_size, self.tagset_size)
return crf_out | 10,117 | 38.678431 | 241 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/model/predictor.py | """
.. module:: predictor
:synopsis: prediction method (for un-annotated text)
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.autograd as autograd
import numpy as np
import itertools
import sys
from tqdm import tqdm
from model.crf import CRFDecode_vb
from model.utils import *
class predict:
"""Base class for prediction, provide method to calculate f1 score and accuracy
args:
if_cuda: if use cuda to speed up
l_map: dictionary for labels
label_seq: type of decode function, set `True` to couple label with text, or set 'False' to insert label into test
batch_size: size of batch in decoding
"""
def __init__(self, if_cuda, l_map, label_seq = True, batch_size = 50):
self.if_cuda = if_cuda
self.l_map = l_map
self.r_l_map = revlut(l_map)
self.batch_size = batch_size
if label_seq:
self.decode_str = self.decode_l
else:
self.decode_str = self.decode_s
def decode_l(self, feature, label):
"""
decode a sentence coupled with label
args:
feature (list): words list
label (list): label list
"""
return '\n'.join(map(lambda t: t[0] + ' '+ self.r_l_map[t[1].item()], zip(feature, label)))
def decode_s(self, feature, label):
"""
decode a sentence in the format of <>
args:
feature (list): words list
label (list): label list
"""
chunks = ""
current = None
for f, y in zip(feature, label):
label = self.r_l_map[y.item()]
if label.startswith('B-'):
if current is not None:
chunks += "</"+current+"> "
current = label[2:]
chunks += "<"+current+"> " + f + " "
elif label.startswith('S-'):
if current is not None:
chunks += " </"+current+"> "
current = label[2:]
chunks += "<"+current+"> " + f + " </"+current+"> "
current = None
elif label.startswith('I-'):
if current is not None:
base = label[2:]
if base == current:
chunks += f+" "
else:
chunks += "</"+current+"> <"+base+"> " + f + " "
current = base
else:
current = label[2:]
chunks += "<"+current+"> " + f + " "
elif label.startswith('E-'):
if current is not None:
base = label[2:]
if base == current:
chunks += f + " </"+base+"> "
current = None
else:
chunks += "</"+current+"> <"+base+"> " + f + " </"+base+"> "
current = None
else:
current = label[2:]
chunks += "<"+current+"> " + f + " </"+current+"> "
current = None
else:
if current is not None:
chunks += "</"+current+"> "
chunks += f+" "
current = None
if current is not None:
chunks += "</"+current+"> "
return chunks
def output_batch(self, ner_model, documents, fout):
"""
decode the whole corpus in the specific format by calling apply_model to fit specific models
args:
ner_model: sequence labeling model
feature (list): list of words list
fout: output file
"""
ner_model.eval()
d_len = len(documents)
for d_ind in tqdm( range(0, d_len), mininterval=1,
desc=' - Process', leave=False, file=sys.stdout):
fout.write('-DOCSTART- -DOCSTART- -DOCSTART-\n\n')
features = documents[d_ind]
f_len = len(features)
for ind in range(0, f_len, self.batch_size):
eind = min(f_len, ind + self.batch_size)
labels = self.apply_model(ner_model, features[ind: eind])
labels = torch.unbind(labels, 1)
for ind2 in range(ind, eind):
f = features[ind2]
l = labels[ind2 - ind][0: len(f) ]
fout.write(self.decode_str(features[ind2], l) + '\n\n')
def apply_model(self, ner_model, features):
"""
template function for apply_model
args:
ner_model: sequence labeling model
feature (list): list of words list
"""
return None
class predict_w(predict):
"""prediction class for word level model (LSTM-CRF)
args:
if_cuda: if use cuda to speed up
f_map: dictionary for words
l_map: dictionary for labels
pad_word: word padding
pad_label: label padding
start_label: start label
label_seq: type of decode function, set `True` to couple label with text, or set 'False' to insert label into test
batch_size: size of batch in decoding
caseless: caseless or not
"""
def __init__(self, if_cuda, f_map, l_map, pad_word, pad_label, start_label, label_seq = True, batch_size = 50, caseless=True):
predict.__init__(self, if_cuda, l_map, label_seq, batch_size)
self.decoder = CRFDecode_vb(len(l_map), start_label, pad_label)
self.pad_word = pad_word
self.f_map = f_map
self.l_map = l_map
self.caseless = caseless
def apply_model(self, ner_model, features):
"""
apply_model function for LSTM-CRF
args:
ner_model: sequence labeling model
feature (list): list of words list
"""
if self.caseless:
features = list(map(lambda t: list(map(lambda x: x.lower(), t)), features))
features = encode_safe(features, self.f_map, self.f_map['<unk>'])
f_len = max(map(lambda t: len(t) + 1, features))
masks = torch.ByteTensor(list(map(lambda t: [1] * (len(t) + 1) + [0] * (f_len - len(t) - 1), features)))
word_features = torch.LongTensor(list(map(lambda t: t + [self.pad_word] * (f_len - len(t)), features)))
if self.if_cuda:
fea_v = autograd.Variable(word_features.transpose(0, 1)).cuda()
mask_v = masks.transpose(0, 1).cuda()
else:
fea_v = autograd.Variable(word_features.transpose(0, 1))
mask_v = masks.transpose(0, 1).contiguous()
scores, _ = ner_model(fea_v)
decoded = self.decoder.decode(scores.data, mask_v)
return decoded
class predict_wc(predict):
"""prediction class for LM-LSTM-CRF
args:
if_cuda: if use cuda to speed up
f_map: dictionary for words
c_map: dictionary for chars
l_map: dictionary for labels
pad_word: word padding
pad_char: word padding
pad_label: label padding
start_label: start label
label_seq: type of decode function, set `True` to couple label with text, or set 'False' to insert label into test
batch_size: size of batch in decoding
caseless: caseless or not
"""
def __init__(self, if_cuda, f_map, c_map, l_map, pad_word, pad_char, pad_label, start_label, label_seq = True, batch_size = 50, caseless=True):
predict.__init__(self, if_cuda, l_map, label_seq, batch_size)
self.decoder = CRFDecode_vb(len(l_map), start_label, pad_label)
self.pad_word = pad_word
self.pad_char = pad_char
self.f_map = f_map
self.c_map = c_map
self.l_map = l_map
self.caseless = caseless
def apply_model(self, ner_model, features):
"""
apply_model function for LM-LSTM-CRF
args:
ner_model: sequence labeling model
feature (list): list of words list
"""
char_features = encode2char_safe(features, self.c_map)
if self.caseless:
word_features = encode_safe(list(map(lambda t: list(map(lambda x: x.lower(), t)), features)), self.f_map, self.f_map['<unk>'])
else:
word_features = encode_safe(features, self.f_map, self.f_map['<unk>'])
fea_len = [list( map( lambda t: len(t) + 1, f) ) for f in char_features]
forw_features = concatChar(char_features, self.c_map)
word_len = max(map(lambda t: len(t) + 1, word_features))
char_len = max(map(lambda t: len(t[0]) + word_len - len(t[1]), zip(forw_features, word_features)))
forw_t = list( map( lambda t: t + [self.pad_char] * ( char_len - len(t) ), forw_features ) )
back_t = torch.LongTensor( list( map( lambda t: t[::-1], forw_t ) ) )
forw_t = torch.LongTensor( forw_t )
forw_p = torch.LongTensor( list( map( lambda t: list(itertools.accumulate( t + [1] * (word_len - len(t) ) ) ), fea_len) ) )
back_p = torch.LongTensor( list( map( lambda t: [char_len - 1] + [ char_len - 1 - tup for tup in t[:-1] ], forw_p) ) )
masks = torch.ByteTensor(list(map(lambda t: [1] * (len(t) + 1) + [0] * (word_len - len(t) - 1), word_features)))
word_t = torch.LongTensor(list(map(lambda t: t + [self.pad_word] * (word_len - len(t)), word_features)))
if self.if_cuda:
f_f = autograd.Variable(forw_t.transpose(0, 1)).cuda()
f_p = autograd.Variable(forw_p.transpose(0, 1)).cuda()
b_f = autograd.Variable(back_t.transpose(0, 1)).cuda()
b_p = autograd.Variable(back_p.transpose(0, 1)).cuda()
w_f = autograd.Variable(word_t.transpose(0, 1)).cuda()
mask_v = masks.transpose(0, 1).cuda()
else:
f_f = autograd.Variable(forw_t.transpose(0, 1))
f_p = autograd.Variable(forw_p.transpose(0, 1))
b_f = autograd.Variable(back_t.transpose(0, 1))
b_p = autograd.Variable(back_p.transpose(0, 1))
w_f = autograd.Variable(word_t.transpose(0, 1))
mask_v = masks.transpose(0, 1)
scores = ner_model(f_f, f_p, b_f, b_p, w_f)
decoded = self.decoder.decode(scores.data, mask_v)
return decoded
| 10,324 | 35.875 | 147 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/model/ner_dataset.py | """
.. module:: datasets
:synopsis: datasets
.. moduleauthor:: Liyuan Liu
"""
from torch.utils.data import Dataset
class CRFDataset(Dataset):
"""Dataset Class for word-level model
args:
data_tensor (ins_num, seq_length): words
label_tensor (ins_num, seq_length): labels
mask_tensor (ins_num, seq_length): padding masks
"""
def __init__(self, data_tensor, label_tensor, mask_tensor):
assert data_tensor.size(0) == label_tensor.size(0)
assert data_tensor.size(0) == mask_tensor.size(0)
self.data_tensor = data_tensor
self.label_tensor = label_tensor
self.mask_tensor = mask_tensor
def __getitem__(self, index):
return self.data_tensor[index], self.label_tensor[index], self.mask_tensor[index]
def __len__(self):
return self.data_tensor.size(0)
class CRFDataset_WC(Dataset):
"""Dataset Class for char-aware model
args:
forw_tensor (ins_num, seq_length): forward chars
forw_index (ins_num, seq_length): index of forward chars
back_tensor (ins_num, seq_length): backward chars
back_index (ins_num, seq_length): index of backward chars
word_tensor (ins_num, seq_length): words
label_tensor (ins_num, seq_length): labels:
mask_tensor (ins_num, seq_length): padding masks
len_tensor (ins_num, 2): length of chars (dim0) and words (dim1)
"""
def __init__(self, forw_tensor, forw_index, back_tensor, back_index, word_tensor, label_tensor, mask_tensor, len_tensor):
assert forw_tensor.size(0) == label_tensor.size(0)
assert forw_tensor.size(0) == mask_tensor.size(0)
assert forw_tensor.size(0) == forw_index.size(0)
assert forw_tensor.size(0) == back_tensor.size(0)
assert forw_tensor.size(0) == back_index.size(0)
assert forw_tensor.size(0) == word_tensor.size(0)
assert forw_tensor.size(0) == len_tensor.size(0)
self.forw_tensor = forw_tensor
self.forw_index = forw_index
self.back_tensor = back_tensor
self.back_index = back_index
self.word_tensor = word_tensor
self.label_tensor = label_tensor
self.mask_tensor = mask_tensor
self.len_tensor = len_tensor
def __getitem__(self, index):
return self.forw_tensor[index], self.forw_index[index], self.back_tensor[index], self.back_index[index], self.word_tensor[index], self.label_tensor[index], self.mask_tensor[index], self.len_tensor[index]
def __len__(self):
return self.forw_tensor.size(0)
| 2,581 | 37.537313 | 211 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/model/utils.py | """
.. module:: utils
:synopsis: utility tools
.. moduleauthor:: Liyuan Liu, Frank Xu
"""
import codecs
import csv
import itertools
from functools import reduce
import numpy as np
import shutil
import torch
import json
import torch.nn as nn
import torch.nn.init
from model.ner_dataset import *
zip = getattr(itertools, 'izip', zip)
def to_scalar(var):
"""change the first element of a tensor to scalar
"""
return var.view(-1).data.tolist()[0]
def argmax(vec):
"""helper function to calculate argmax of input vector at dimension 1
"""
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def log_sum_exp(vec, m_size):
"""
calculate log of exp sum
args:
vec (batch_size, vanishing_dim, hidden_dim) : input tensor
m_size : hidden_dim
return:
batch_size, hidden_dim
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M
return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1, m_size) # B * M
def switch(vec1, vec2, mask):
"""
switch function for pytorch
args:
vec1 (any size) : input tensor corresponding to 0
vec2 (same to vec1) : input tensor corresponding to 1
mask (same to vec1) : input tensor, each element equals to 0/1
return:
vec (*)
"""
catvec = torch.cat([vec1.view(-1, 1), vec2.view(-1, 1)], dim=1)
switched_vec = torch.gather(catvec, 1, mask.long().view(-1, 1))
return switched_vec.view(-1)
def encode2char_safe(input_lines, char_dict):
"""
get char representation of lines
args:
input_lines (list of strings) : input corpus
char_dict (dictionary) : char-level dictionary
return:
forw_lines
"""
unk = char_dict['<u>']
forw_lines = [list(map(lambda m: list(map(lambda t: char_dict.get(t, unk), m)), line)) for line in input_lines]
return forw_lines
def concatChar(input_lines, char_dict):
"""
concat char into string
args:
input_lines (list of list of char) : input corpus
char_dict (dictionary) : char-level dictionary
return:
forw_lines
"""
features = [[char_dict[' ']] + list(reduce(lambda x, y: x + [char_dict[' ']] + y, sentence)) + [char_dict['\n']] for sentence in input_lines]
return features
def encode_safe(input_lines, word_dict, unk):
"""
encode list of strings into word-level representation with unk
"""
lines = list(map(lambda t: list(map(lambda m: word_dict.get(m, unk), t)), input_lines))
return lines
def encode(input_lines, word_dict):
"""
encode list of strings into word-level representation
"""
lines = list(map(lambda t: list(map(lambda m: word_dict[m], t)), input_lines))
return lines
def encode2Tensor(input_lines, word_dict, unk):
"""
encode list of strings into word-level representation (tensor) with unk
"""
lines = list(map(lambda t: torch.LongTensor(list(map(lambda m: word_dict.get(m, unk), t))), input_lines))
return lines
def generate_corpus_char(lines, if_shrink_c_feature=False, c_thresholds=1, if_shrink_w_feature=False, w_thresholds=1):
"""
generate label, feature, word dictionary, char dictionary and label dictionary
args:
lines : corpus
if_shrink_c_feature: whether shrink char-dictionary
c_threshold: threshold for shrinking char-dictionary
if_shrink_w_feature: whether shrink word-dictionary
w_threshold: threshold for shrinking word-dictionary
"""
features, labels, feature_map, label_map = generate_corpus(lines, if_shrink_feature=if_shrink_w_feature, thresholds=w_thresholds)
char_count = dict()
for feature in features:
for word in feature:
for tup in word:
if tup not in char_count:
char_count[tup] = 0
else:
char_count[tup] += 1
if if_shrink_c_feature:
shrink_char_count = [k for (k, v) in iter(char_count.items()) if v >= c_thresholds]
char_map = {shrink_char_count[ind]: ind for ind in range(0, len(shrink_char_count))}
else:
char_map = {k: v for (v, k) in enumerate(char_count.keys())}
char_map['<u>'] = len(char_map) # unk for char
char_map[' '] = len(char_map) # concat for char
char_map['\n'] = len(char_map) # eof for char
return features, labels, feature_map, label_map, char_map
def shrink_features(feature_map, features, thresholds):
"""
filter un-common features by threshold
"""
feature_count = {k: 0 for (k, v) in iter(feature_map.items())}
for feature_list in features:
for feature in feature_list:
feature_count[feature] += 1
shrinked_feature_count = [k for (k, v) in iter(feature_count.items()) if v >= thresholds]
feature_map = {shrinked_feature_count[ind]: (ind + 1) for ind in range(0, len(shrinked_feature_count))}
#inserting unk to be 0 encoded
feature_map['<unk>'] = 0
#inserting eof
feature_map['<eof>'] = len(feature_map)
return feature_map
def generate_corpus(lines, if_shrink_feature=False, thresholds=1):
"""
generate label, feature, word dictionary and label dictionary
args:
lines : corpus
if_shrink_feature: whether shrink word-dictionary
threshold: threshold for shrinking word-dictionary
"""
features = list()
labels = list()
tmp_fl = list()
tmp_ll = list()
feature_map = dict()
label_map = dict()
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
tmp_fl.append(line[0])
if line[0] not in feature_map:
feature_map[line[0]] = len(feature_map) + 1 #0 is for unk
tmp_ll.append(line[-1])
if line[-1] not in label_map:
label_map[line[-1]] = len(label_map)
elif len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
tmp_fl = list()
tmp_ll = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
label_map['<start>'] = len(label_map)
label_map['<pad>'] = len(label_map)
if if_shrink_feature:
feature_map = shrink_features(feature_map, features, thresholds)
else:
#inserting unk to be 0 encoded
feature_map['<unk>'] = 0
#inserting eof
feature_map['<eof>'] = len(feature_map)
return features, labels, feature_map, label_map
def read_corpus(lines):
"""
convert corpus into features and labels
"""
features = list()
labels = list()
tmp_fl = list()
tmp_ll = list()
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
tmp_fl.append(line[0])
tmp_ll.append(line[-1])
elif len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
tmp_fl = list()
tmp_ll = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
return features, labels
def read_features(lines, multi_docs = True):
"""
convert un-annotated corpus into features
"""
if multi_docs:
documents = list()
features = list()
tmp_fl = list()
for line in lines:
if_doc_end = (len(line) > 10 and line[0:10] == '-DOCSTART-')
if not (line.isspace() or if_doc_end):
line = line.split()[0]
tmp_fl.append(line)
else:
if len(tmp_fl) > 0:
features.append(tmp_fl)
tmp_fl = list()
if if_doc_end and len(features) > 0:
documents.append(features)
features = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
if len(features) >0:
documents.append(features)
return documents
else:
features = list()
tmp_fl = list()
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.split()[0]
tmp_fl.append(line)
elif len(tmp_fl) > 0:
features.append(tmp_fl)
tmp_fl = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
return features
def shrink_embedding(feature_map, word_dict, word_embedding, caseless):
"""
shrink embedding dictionary to in-doc words only
"""
if caseless:
feature_map = set([k.lower() for k in feature_map.keys()])
new_word_list = [k for k in word_dict.keys() if (k in feature_map)]
new_word_dict = {k:v for (v, k) in enumerate(new_word_list)}
new_word_list_ind = torch.LongTensor([word_dict[k] for k in new_word_list])
new_embedding = word_embedding[new_word_list_ind]
return new_word_dict, new_embedding
def encode_corpus(lines, f_map, l_map, if_lower = False):
"""
encode corpus into features and labels
"""
tmp_fl = []
tmp_ll = []
features = []
labels = []
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
tmp_fl.append(line[0])
tmp_ll.append(line[-1])
elif len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
tmp_fl = list()
tmp_ll = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
if if_lower:
features = list(map(lambda t: list(map(lambda x: x.lower(), t)), features))
feature_e = encode_safe(features, f_map, f_map['<unk>'])
label_e = encode(labels, l_map)
return feature_e, label_e
def encode_corpus_c(lines, f_map, l_map, c_map):
"""
encode corpus into features (both word-level and char-level) and labels
"""
tmp_fl = []
tmp_ll = []
features = []
labels = []
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
tmp_fl.append(line[0])
tmp_ll.append(line[-1])
elif len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
tmp_fl = list()
tmp_ll = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
feature_c = encode2char_safe(features, c_map)
feature_e = encode_safe(features, f_map, f_map['<unk>'])
label_e = encode(labels, l_map)
return feature_c, feature_e, label_e
def load_embedding(emb_file, delimiter, feature_map, caseless, unk, shrink_to_train=False):
"""
load embedding
"""
if caseless:
feature_set = set([key.lower() for key in feature_map])
else:
feature_set = set([key for key in feature_map])
word_dict = dict()
embedding_array = list()
for line in open(emb_file, 'r'):
line = line.split(delimiter)
vector = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))
if shrink_to_train and line[0] not in feature_set:
continue
if line[0] == unk:
word_dict['<unk>'] = len(word_dict)
else:
word_dict[line[0]] = len(word_dict)
embedding_array.append(vector)
embedding_tensor_1 = torch.FloatTensor(np.asarray(embedding_array))
emb_len = embedding_tensor_1.size(1)
rand_embedding_count = 0
for key in feature_map:
if caseless:
key = key.lower()
if key not in word_dict:
word_dict[key] = len(word_dict)
rand_embedding_count += 1
rand_embedding_tensor = torch.FloatTensor(rand_embedding_count, emb_len)
init_embedding(rand_embedding_tensor)
embedding_tensor = torch.cat((embedding_tensor_1, rand_embedding_tensor), 0)
return word_dict, embedding_tensor
def load_embedding_wlm(emb_file, delimiter, feature_map, full_feature_set, caseless, unk, emb_len, shrink_to_train=False, shrink_to_corpus=False):
"""
load embedding, indoc words would be listed before outdoc words
args:
emb_file: path to embedding file
delimiter: delimiter of lines
feature_map: word dictionary
full_feature_set: all words in the corpus
caseless: convert into casesless style
unk: string for unknown token
emb_len: dimension of embedding vectors
shrink_to_train: whether to shrink out-of-training set or not
shrink_to_corpus: whether to shrink out-of-corpus or not
"""
if caseless:
feature_set = set([key.lower() for key in feature_map])
full_feature_set = set([key.lower() for key in full_feature_set])
else:
feature_set = set([key for key in feature_map])
full_feature_set = set([key for key in full_feature_set])
#ensure <unk> is 0
word_dict = {v:(k+1) for (k,v) in enumerate(feature_set - set(['<unk>']))}
word_dict['<unk>'] = 0
in_doc_freq_num = len(word_dict)
rand_embedding_tensor = torch.FloatTensor(in_doc_freq_num, emb_len)
init_embedding(rand_embedding_tensor)
indoc_embedding_array = list()
indoc_word_array = list()
outdoc_embedding_array = list()
outdoc_word_array = list()
for line in open(emb_file, 'r'):
line = line.split(delimiter)
vector = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))
if shrink_to_train and line[0] not in feature_set:
continue
if line[0] == unk:
rand_embedding_tensor[0] = torch.FloatTensor(vector) #unk is 0
elif line[0] in word_dict:
rand_embedding_tensor[word_dict[line[0]]] = torch.FloatTensor(vector)
elif line[0] in full_feature_set:
indoc_embedding_array.append(vector)
indoc_word_array.append(line[0])
elif not shrink_to_corpus:
outdoc_word_array.append(line[0])
outdoc_embedding_array.append(vector)
embedding_tensor_0 = torch.FloatTensor(np.asarray(indoc_embedding_array))
if not shrink_to_corpus:
embedding_tensor_1 = torch.FloatTensor(np.asarray(outdoc_embedding_array))
word_emb_len = embedding_tensor_0.size(1)
assert(word_emb_len == emb_len)
if shrink_to_corpus:
embedding_tensor = torch.cat([rand_embedding_tensor, embedding_tensor_0], 0)
else:
embedding_tensor = torch.cat([rand_embedding_tensor, embedding_tensor_0, embedding_tensor_1], 0)
for word in indoc_word_array:
word_dict[word] = len(word_dict)
in_doc_num = len(word_dict)
if not shrink_to_corpus:
for word in outdoc_word_array:
word_dict[word] = len(word_dict)
return word_dict, embedding_tensor, in_doc_num
def calc_threshold_mean(features):
"""
calculate the threshold for bucket by mean
"""
lines_len = list(map(lambda t: len(t) + 1, features))
average = int(sum(lines_len) / len(lines_len))
lower_line = list(filter(lambda t: t < average, lines_len))
upper_line = list(filter(lambda t: t >= average, lines_len))
lower_average = int(sum(lower_line) / len(lower_line))
upper_average = int(sum(upper_line) / len(upper_line))
max_len = max(lines_len)
return [lower_average, average, upper_average, max_len]
def construct_bucket_mean_gd(input_features, input_label, word_dict, label_dict):
"""
Construct bucket by mean for greedy decode, word-level only
"""
# encode and padding
features = encode_safe(input_features, word_dict, word_dict['<unk>'])
labels = encode(input_label, label_dict)
labels = list(map(lambda t: [label_dict['<start>']] + list(t), labels))
thresholds = calc_threshold_mean(features)
return construct_bucket_gd(features, labels, thresholds, word_dict['<eof>'], label_dict['<pad>'])
def construct_bucket_mean_vb(input_features, input_label, word_dict, label_dict, caseless):
"""
Construct bucket by mean for viterbi decode, word-level only
"""
# encode and padding
if caseless:
input_features = list(map(lambda t: list(map(lambda x: x.lower(), t)), input_features))
features = encode_safe(input_features, word_dict, word_dict['<unk>'])
labels = encode(input_label, label_dict)
labels = list(map(lambda t: [label_dict['<start>']] + list(t), labels))
thresholds = calc_threshold_mean(features)
return construct_bucket_vb(features, labels, thresholds, word_dict['<eof>'], label_dict['<pad>'], len(label_dict))
def construct_bucket_mean_vb_wc(word_features, input_label, label_dict, char_dict, word_dict, caseless):
"""
Construct bucket by mean for viterbi decode, word-level and char-level
"""
# encode and padding
char_features = encode2char_safe(word_features, char_dict)
fea_len = [list(map(lambda t: len(t) + 1, f)) for f in char_features]
forw_features = concatChar(char_features, char_dict)
labels = encode(input_label, label_dict)
labels = list(map(lambda t: [label_dict['<start>']] + list(t), labels))
thresholds = calc_threshold_mean(fea_len)
if caseless:
word_features = list(map(lambda t: list(map(lambda x: x.lower(), t)), word_features))
word_features = encode_safe(word_features, word_dict, word_dict['<unk>'])
return construct_bucket_vb_wc(word_features, forw_features, fea_len, labels, thresholds, word_dict['<eof>'], char_dict['\n'], label_dict['<pad>'], len(label_dict))
def construct_bucket_vb_wc(word_features, forw_features, fea_len, input_labels, thresholds, pad_word_feature, pad_char_feature, pad_label, label_size):
"""
Construct bucket by thresholds for viterbi decode, word-level and char-level
"""
# construct corpus for language model pre-training
forw_corpus = [pad_char_feature]
for forw_feature in forw_features:
forw_corpus.extend(forw_feature + [pad_char_feature])
back_corpus = forw_corpus[::-1]
# two way construct, first build the bucket, then calculate padding length, then do the padding
buckets = [[[], [], [], [], [], [], [], []] for ind in range(len(thresholds))]
# forw, forw_ind, back, back_in, label, mask
buckets_len = [0 for ind in range(len(thresholds))]
# thresholds is the padded length for fea
# buckets_len is the padded length for char
for f_f, f_l in zip(forw_features, fea_len):
cur_len_1 = len(f_l) + 1
idx = 0
while thresholds[idx] < cur_len_1:
idx += 1
tmp_concat_len = len(f_f) + thresholds[idx] - len(f_l)
if buckets_len[idx] < tmp_concat_len:
buckets_len[idx] = tmp_concat_len
# calc padding
for f_f, f_l, w_f, i_l in zip(forw_features, fea_len, word_features, input_labels):
cur_len = len(f_l)
idx = 0
cur_len_1 = cur_len + 1
while thresholds[idx] < cur_len_1:
idx += 1
padded_feature = f_f + [pad_char_feature] * (buckets_len[idx] - len(f_f)) # pad feature with <'\n'>, at least one
padded_feature_len = f_l + [1] * (thresholds[idx] - len(f_l)) # pad feature length with <'\n'>, at least one
padded_feature_len_cum = list(itertools.accumulate(padded_feature_len)) # start from 0, but the first is ' ', so the position need not to be -1
buckets[idx][0].append(padded_feature) # char
buckets[idx][1].append(padded_feature_len_cum)
buckets[idx][2].append(padded_feature[::-1])
buckets[idx][3].append([buckets_len[idx] - 1] + [buckets_len[idx] - 1 - tup for tup in padded_feature_len_cum[:-1]])
buckets[idx][4].append(w_f + [pad_word_feature] * (thresholds[idx] - cur_len)) #word
buckets[idx][5].append([i_l[ind] * label_size + i_l[ind + 1] for ind in range(0, cur_len)] + [i_l[cur_len] * label_size + pad_label] + [pad_label * label_size + pad_label] * (thresholds[idx] - cur_len_1)) # has additional start, label
buckets[idx][6].append([1] * cur_len_1 + [0] * (thresholds[idx] - cur_len_1)) # has additional start, mask
buckets[idx][7].append([len(f_f) + thresholds[idx] - len(f_l), cur_len_1])
bucket_dataset = [CRFDataset_WC(torch.LongTensor(bucket[0]), torch.LongTensor(bucket[1]),
torch.LongTensor(bucket[2]), torch.LongTensor(bucket[3]),
torch.LongTensor(bucket[4]), torch.LongTensor(bucket[5]),
torch.ByteTensor(bucket[6]), torch.LongTensor(bucket[7])) for bucket in buckets]
return bucket_dataset, forw_corpus, back_corpus
def construct_bucket_vb(input_features, input_labels, thresholds, pad_feature, pad_label, label_size):
"""
Construct bucket by thresholds for viterbi decode, word-level only
"""
buckets = [[[], [], []] for _ in range(len(thresholds))]
for feature, label in zip(input_features, input_labels):
cur_len = len(feature)
idx = 0
cur_len_1 = cur_len + 1
while thresholds[idx] < cur_len_1:
idx += 1
buckets[idx][0].append(feature + [pad_feature] * (thresholds[idx] - cur_len))
buckets[idx][1].append([label[ind] * label_size + label[ind + 1] for ind in range(0, cur_len)] + [
label[cur_len] * label_size + pad_label] + [pad_label * label_size + pad_label] * (
thresholds[idx] - cur_len_1))
buckets[idx][2].append([1] * cur_len_1 + [0] * (thresholds[idx] - cur_len_1))
bucket_dataset = [CRFDataset(torch.LongTensor(bucket[0]), torch.LongTensor(bucket[1]), torch.ByteTensor(bucket[2]))
for bucket in buckets]
return bucket_dataset
def construct_bucket_gd(input_features, input_labels, thresholds, pad_feature, pad_label):
"""
Construct bucket by thresholds for greedy decode, word-level only
"""
buckets = [[[], [], []] for ind in range(len(thresholds))]
for feature, label in zip(input_features, input_labels):
cur_len = len(feature)
cur_len_1 = cur_len + 1
idx = 0
while thresholds[idx] < cur_len_1:
idx += 1
buckets[idx][0].append(feature + [pad_feature] * (thresholds[idx] - cur_len))
buckets[idx][1].append(label[1:] + [pad_label] * (thresholds[idx] - cur_len))
buckets[idx][2].append(label + [pad_label] * (thresholds[idx] - cur_len_1))
bucket_dataset = [CRFDataset(torch.LongTensor(bucket[0]), torch.LongTensor(bucket[1]), torch.LongTensor(bucket[2])) for bucket in buckets]
return bucket_dataset
def find_length_from_feats(feats, feat_to_ix):
"""
find length of unpadded features based on feature
"""
end_position = len(feats) - 1
for position, feat in enumerate(feats):
if feat.data[0] == feat_to_ix['<eof>']:
end_position = position
break
return end_position + 1
def find_length_from_labels(labels, label_to_ix):
"""
find length of unpadded features based on labels
"""
end_position = len(labels) - 1
for position, label in enumerate(labels):
if label == label_to_ix['<pad>']:
end_position = position
break
return end_position
def revlut(lut):
return {v: k for k, v in lut.items()}
# Turn a sequence of IOB chunks into single tokens
def iob_to_spans(sequence, lut, strict_iob2=False):
"""
convert to iob to span
"""
iobtype = 2 if strict_iob2 else 1
chunks = []
current = None
for i, y in enumerate(sequence):
label = lut[y]
if label.startswith('B-'):
if current is not None:
chunks.append('@'.join(current))
current = [label.replace('B-', ''), '%d' % i]
elif label.startswith('I-'):
if current is not None:
base = label.replace('I-', '')
if base == current[0]:
current.append('%d' % i)
else:
chunks.append('@'.join(current))
if iobtype == 2:
print('Warning, type=IOB2, unexpected format ([%s] follows other tag type [%s] @ %d)' % (
label, current[0], i))
current = [base, '%d' % i]
else:
current = [label.replace('I-', ''), '%d' % i]
if iobtype == 2:
print('Warning, unexpected format (I before B @ %d) %s' % (i, label))
else:
if current is not None:
chunks.append('@'.join(current))
current = None
if current is not None:
chunks.append('@'.join(current))
return set(chunks)
# Turn a sequence of IOBES chunks into single tokens
def iobes_to_spans(sequence, lut, strict_iob2=False):
"""
convert to iobes to span
"""
iobtype = 2 if strict_iob2 else 1
chunks = []
current = None
for i, y in enumerate(sequence):
label = lut[y]
if label.startswith('B-'):
if current is not None:
chunks.append('@'.join(current))
current = [label.replace('B-', ''), '%d' % i]
elif label.startswith('S-'):
if current is not None:
chunks.append('@'.join(current))
current = None
base = label.replace('S-', '')
chunks.append('@'.join([base, '%d' % i]))
elif label.startswith('I-'):
if current is not None:
base = label.replace('I-', '')
if base == current[0]:
current.append('%d' % i)
else:
chunks.append('@'.join(current))
if iobtype == 2:
print('Warning')
current = [base, '%d' % i]
else:
current = [label.replace('I-', ''), '%d' % i]
if iobtype == 2:
print('Warning')
elif label.startswith('E-'):
if current is not None:
base = label.replace('E-', '')
if base == current[0]:
current.append('%d' % i)
chunks.append('@'.join(current))
current = None
else:
chunks.append('@'.join(current))
if iobtype == 2:
print('Warning')
current = [base, '%d' % i]
chunks.append('@'.join(current))
current = None
else:
current = [label.replace('E-', ''), '%d' % i]
if iobtype == 2:
print('Warning')
chunks.append('@'.join(current))
current = None
else:
if current is not None:
chunks.append('@'.join(current))
current = None
if current is not None:
chunks.append('@'.join(current))
return set(chunks)
def fill_y(nc, yidx):
"""
fill y to dense matrix
"""
batchsz = yidx.shape[0]
siglen = yidx.shape[1]
dense = np.zeros((batchsz, siglen, nc), dtype=np.int)
for i in range(batchsz):
for j in range(siglen):
idx = int(yidx[i, j])
if idx > 0:
dense[i, j, idx] = 1
return dense
def save_checkpoint(state, track_list, filename):
"""
save checkpoint
"""
with open(filename+'.json', 'w') as f:
json.dump(track_list, f)
torch.save(state, filename+'.model')
def adjust_learning_rate(optimizer, lr):
"""
shrink learning rate for pytorch
"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def init_embedding(input_embedding):
"""
Initialize embedding
"""
bias = np.sqrt(3.0 / input_embedding.size(1))
nn.init.uniform_(input_embedding, -bias, bias)
def init_linear(input_linear):
"""
Initialize linear transformation
"""
bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear.weight.size(1)))
nn.init.uniform_(input_linear.weight, -bias, bias)
if input_linear.bias is not None:
input_linear.bias.data.zero_()
def init_lstm(input_lstm):
"""
Initialize lstm
"""
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.weight_ih_l'+str(ind))
bias = np.sqrt(6.0 / (weight.size(0)/4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
weight = eval('input_lstm.weight_hh_l'+str(ind))
bias = np.sqrt(6.0 / (weight.size(0)/4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
if input_lstm.bias:
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.bias_ih_l'+str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1
weight = eval('input_lstm.bias_hh_l'+str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1
| 29,437 | 34.424789 | 243 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/model/lstm_crf.py | """
.. module:: lstm_crf
:synopsis: lstm_crf
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.autograd as autograd
import torch.nn as nn
import model.crf as crf
import model.utils as utils
class LSTM_CRF(nn.Module):
"""LSTM_CRF model
args:
vocab_size: size of word dictionary
tagset_size: size of label set
embedding_dim: size of word embedding
hidden_dim: size of word-level blstm hidden dim
rnn_layers: number of word-level lstm layers
dropout_ratio: dropout ratio
large_CRF: use CRF_L or not, refer model.crf.CRF_L and model.crf.CRF_S for more details
"""
def __init__(self, vocab_size, tagset_size, embedding_dim, hidden_dim, rnn_layers, dropout_ratio, large_CRF=True):
super(LSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,
num_layers=rnn_layers, bidirectional=True, dropout=dropout_ratio)
self.rnn_layers = rnn_layers
self.dropout1 = nn.Dropout(p=dropout_ratio)
self.dropout2 = nn.Dropout(p=dropout_ratio)
self.tagset_size = tagset_size
if large_CRF:
self.crf = crf.CRF_L(hidden_dim, tagset_size)
else:
self.crf = crf.CRF_S(hidden_dim, tagset_size)
self.batch_size = 1
self.seq_length = 1
def rand_init_hidden(self):
"""
random initialize hidden variable
"""
return autograd.Variable(
torch.randn(2 * self.rnn_layers, self.batch_size, self.hidden_dim // 2)), autograd.Variable(
torch.randn(2 * self.rnn_layers, self.batch_size, self.hidden_dim // 2))
def set_batch_size(self, bsize):
"""
set batch size
"""
self.batch_size = bsize
def set_batch_seq_size(self, sentence):
"""
set batch size and sequence length
"""
tmp = sentence.size()
self.seq_length = tmp[0]
self.batch_size = tmp[1]
def load_pretrained_embedding(self, pre_embeddings):
"""
load pre-trained word embedding
args:
pre_word_embeddings (self.word_size, self.word_dim) : pre-trained embedding
"""
assert (pre_embeddings.size()[1] == self.embedding_dim)
self.word_embeds.weight = nn.Parameter(pre_embeddings)
def rand_init_embedding(self):
utils.init_embedding(self.word_embeds.weight)
def rand_init(self, init_embedding=False):
"""
random initialization
args:
init_embedding: random initialize embedding or not
"""
if init_embedding:
utils.init_embedding(self.word_embeds.weight)
utils.init_lstm(self.lstm)
self.crf.rand_init()
def forward(self, sentence, hidden=None):
'''
args:
sentence (word_seq_len, batch_size) : word-level representation of sentence
hidden: initial hidden state
return:
crf output (word_seq_len, batch_size, tag_size, tag_size), hidden
'''
self.set_batch_seq_size(sentence)
embeds = self.word_embeds(sentence)
d_embeds = self.dropout1(embeds)
lstm_out, hidden = self.lstm(d_embeds, hidden)
lstm_out = lstm_out.view(-1, self.hidden_dim)
d_lstm_out = self.dropout2(lstm_out)
crf_out = self.crf(d_lstm_out)
crf_out = crf_out.view(self.seq_length, self.batch_size, self.tagset_size, self.tagset_size)
return crf_out, hidden | 3,738 | 30.420168 | 118 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/model/evaluator.py | """
.. module:: evaluator
:synopsis: evaluation method (f1 score and accuracy)
.. moduleauthor:: Liyuan Liu, Frank Xu
"""
import torch
import numpy as np
import itertools
import model.utils as utils
from torch.autograd import Variable
from model.crf import CRFDecode_vb
class eval_batch:
"""Base class for evaluation, provide method to calculate f1 score and accuracy
args:
packer: provide method to convert target into original space [TODO: need to improve]
l_map: dictionary for labels
"""
def __init__(self, packer, l_map):
self.packer = packer
self.l_map = l_map
self.r_l_map = utils.revlut(l_map)
self.totalp_counts={}
self.truep_counts={}
self.fn_counts={}
self.fp_counts={}
self.f1={}
def reset(self):
"""
re-set all states
"""
self.correct_labels = 0
self.total_labels = 0
self.gold_count = 0
self.guess_count = 0
self.overlap_count = 0
self.totalp_counts={}
self.truep_counts={}
self.fn_counts={}
self.fp_counts={}
self.f1={}
def calc_f1_batch(self, decoded_data, target_data):
"""
update statics for f1 score
args:
decoded_data (batch_size, seq_len): prediction sequence
target_data (batch_size, seq_len): ground-truth
"""
batch_decoded = torch.unbind(decoded_data, 1)
batch_targets = torch.unbind(target_data, 0)
for decoded, target in zip(batch_decoded, batch_targets):
gold = self.packer.convert_for_eval(target)
# remove padding
length = utils.find_length_from_labels(gold, self.l_map)
gold = gold[:length]
best_path = decoded[:length]
correct_labels_i, total_labels_i, gold_count_i, guess_count_i, overlap_count_i = self.eval_instance(best_path.numpy(), gold.numpy())
self.correct_labels += correct_labels_i
self.total_labels += total_labels_i
self.gold_count += gold_count_i
self.guess_count += guess_count_i
self.overlap_count += overlap_count_i
def calc_acc_batch(self, decoded_data, target_data):
"""
update statics for accuracy
args:
decoded_data (batch_size, seq_len): prediction sequence
target_data (batch_size, seq_len): ground-truth
"""
batch_decoded = torch.unbind(decoded_data, 1)
batch_targets = torch.unbind(target_data, 0)
for decoded, target in zip(batch_decoded, batch_targets):
gold = self.packer.convert_for_eval(target)
# remove padding
length = utils.find_length_from_labels(gold, self.l_map)
gold = gold[:length].numpy()
best_path = decoded[:length].numpy()
self.total_labels += length
self.correct_labels += np.sum(np.equal(best_path, gold))
def f1_score(self):
"""
calculate f1 score based on statics
"""
if self.guess_count == 0:
return {'total': (0.0, 0.0, 0.0, 0.0, '')}
precision = self.overlap_count / float(self.guess_count)
recall = self.overlap_count / float(self.gold_count)
if precision == 0.0 or recall == 0.0:
return {'total': (0.0, 0.0, 0.0, 0.0, '')}
f = 2 * (precision * recall) / (precision + recall)
accuracy = float(self.correct_labels) / self.total_labels
message=""
self.f1['total'] = (f, precision, recall, accuracy, message)
for label in self.totalp_counts:
tp = self.truep_counts.get(label,1)
fn = sum(self.fn_counts.get(label,{}).values())
fp = sum(self.fp_counts.get(label,{}).values())
# print(label, str(tp), str(fp), str(fn), str(self.totalp_counts.get(label,0)))
precision = tp / float(tp+fp+1e-9)
recall = tp / float(tp+fn+1e-9)
f = 2 * (precision * recall) / (precision + recall+1e-9)
message = str(self.fn_counts.get(label, {}))
self.f1[label] = (f, precision, recall, 0, message)
return self.f1
def acc_score(self):
"""
calculate accuracy score based on statics
"""
if 0 == self.total_labels:
return 0.0
accuracy = float(self.correct_labels) / self.total_labels
return accuracy
def eval_instance(self, best_path, gold):
"""
update statics for one instance
args:
best_path (seq_len): predicted
gold (seq_len): ground-truth
"""
total_labels = len(best_path)
correct_labels = np.sum(np.equal(best_path, gold))
for i in range(total_labels):
gold_label = self.r_l_map[gold[i]]
guessed_label = self.r_l_map[best_path[i]]
self.totalp_counts[gold_label] = 1 + self.totalp_counts.get(gold_label,0)
if gold_label == guessed_label:
self.truep_counts[gold_label] = 1 + self.truep_counts.get(gold_label,0)
else:
val = self.fn_counts.get(gold_label,{})
val[guessed_label] = 1+ val.get(guessed_label,0)
self.fn_counts[gold_label]=val
val2 = self.fp_counts.get(guessed_label,{})
val2[gold_label] = 1+ val2.get(gold_label,0)
self.fp_counts[guessed_label] = val2
gold_chunks = utils.iobes_to_spans(gold, self.r_l_map)
gold_count = len(gold_chunks)
guess_chunks = utils.iobes_to_spans(best_path, self.r_l_map)
guess_count = len(guess_chunks)
overlap_chunks = gold_chunks & guess_chunks
overlap_count = len(overlap_chunks)
return correct_labels, total_labels, gold_count, guess_count, overlap_count
class eval_w(eval_batch):
"""evaluation class for word level model (LSTM-CRF)
args:
packer: provide method to convert target into original space [TODO: need to improve]
l_map: dictionary for labels
score_type: use f1score with using 'f'
"""
def __init__(self, packer, l_map, score_type):
eval_batch.__init__(self, packer, l_map)
self.decoder = CRFDecode_vb(len(l_map), l_map['<start>'], l_map['<pad>'])
if 'f' in score_type:
self.eval_b = self.calc_f1_batch
self.calc_s = self.f1_score
else:
self.eval_b = self.calc_acc_batch
self.calc_s = self.acc_score
def calc_score(self, ner_model, dataset_loader):
"""
calculate score for pre-selected metrics
args:
ner_model: LSTM-CRF model
dataset_loader: loader class for test set
"""
ner_model.eval()
self.reset()
for feature, tg, mask in itertools.chain.from_iterable(dataset_loader):
fea_v, _, mask_v = self.packer.repack_vb(feature, tg, mask)
scores, _ = ner_model(fea_v)
decoded = self.decoder.decode(scores.data, mask_v.data)
self.eval_b(decoded, tg)
return self.calc_s()
class eval_wc(eval_batch):
"""evaluation class for LM-LSTM-CRF
args:
packer: provide method to convert target into original space [TODO: need to improve]
l_map: dictionary for labels
score_type: use f1score with using 'f'
"""
def __init__(self, packer, l_map, score_type):
eval_batch.__init__(self, packer, l_map)
self.decoder = CRFDecode_vb(len(l_map), l_map['<start>'], l_map['<pad>'])
if 'f' in score_type:
self.eval_b = self.calc_f1_batch
self.calc_s = self.f1_score
else:
self.eval_b = self.calc_acc_batch
self.calc_s = self.acc_score
def calc_score(self, ner_model, dataset_loader):
"""
calculate score for pre-selected metrics
args:
ner_model: LM-LSTM-CRF model
dataset_loader: loader class for test set
"""
ner_model.eval()
self.reset()
for f_f, f_p, b_f, b_p, w_f, tg, mask_v, len_v in itertools.chain.from_iterable(dataset_loader):
f_f, f_p, b_f, b_p, w_f, _, mask_v = self.packer.repack_vb(f_f, f_p, b_f, b_p, w_f, tg, mask_v, len_v)
scores = ner_model(f_f, f_p, b_f, b_p, w_f)
decoded = self.decoder.decode(scores.data, mask_v.data)
self.eval_b(decoded, tg)
return self.calc_s()
| 8,538 | 33.01992 | 144 | py |
LM-LSTM-CRF | LM-LSTM-CRF-master/model/crf.py | """
.. module:: crf
:synopsis: conditional random field
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import torch.sparse as sparse
import model.utils as utils
class CRF_L(nn.Module):
"""Conditional Random Field (CRF) layer. This version is used in Ma et al. 2016, has more parameters than CRF_S
args:
hidden_dim : input dim size
tagset_size: target_set_size
if_biase: whether allow bias in linear trans
"""
def __init__(self, hidden_dim, tagset_size, if_bias=True):
super(CRF_L, self).__init__()
self.tagset_size = tagset_size
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size * self.tagset_size, bias=if_bias)
def rand_init(self):
"""random initialization
"""
utils.init_linear(self.hidden2tag)
def forward(self, feats):
"""
args:
feats (batch_size, seq_len, hidden_dim) : input score from previous layers
return:
output from crf layer (batch_size, seq_len, tag_size, tag_size)
"""
return self.hidden2tag(feats).view(-1, self.tagset_size, self.tagset_size)
class CRF_S(nn.Module):
"""Conditional Random Field (CRF) layer. This version is used in Lample et al. 2016, has less parameters than CRF_L.
args:
hidden_dim: input dim size
tagset_size: target_set_size
if_biase: whether allow bias in linear trans
"""
def __init__(self, hidden_dim, tagset_size, if_bias=True):
super(CRF_S, self).__init__()
self.tagset_size = tagset_size
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size, bias=if_bias)
self.transitions = nn.Parameter(torch.Tensor(self.tagset_size, self.tagset_size))
def rand_init(self):
"""random initialization
"""
utils.init_linear(self.hidden2tag)
self.transitions.data.zero_()
def forward(self, feats):
"""
args:
feats (batch_size, seq_len, hidden_dim) : input score from previous layers
return:
output from crf layer ( (batch_size * seq_len), tag_size, tag_size)
"""
scores = self.hidden2tag(feats).view(-1, self.tagset_size, 1)
ins_num = scores.size(0)
crf_scores = scores.expand(ins_num, self.tagset_size, self.tagset_size) + self.transitions.view(1, self.tagset_size, self.tagset_size).expand(ins_num, self.tagset_size, self.tagset_size)
return crf_scores
class CRFRepack:
"""Packer for word level model
args:
tagset_size: target_set_size
if_cuda: whether use GPU
"""
def __init__(self, tagset_size, if_cuda):
self.tagset_size = tagset_size
self.if_cuda = if_cuda
def repack_vb(self, feature, target, mask):
"""packer for viterbi loss
args:
feature (Seq_len, Batch_size): input feature
target (Seq_len, Batch_size): output target
mask (Seq_len, Batch_size): padding mask
return:
feature (Seq_len, Batch_size), target (Seq_len, Batch_size), mask (Seq_len, Batch_size)
"""
if self.if_cuda:
fea_v = feature.transpose(0, 1).cuda()
tg_v = target.transpose(0, 1).unsqueeze(2).cuda()
mask_v = mask.transpose(0, 1).cuda()
else:
fea_v = feature.transpose(0, 1)
tg_v = target.transpose(0, 1).contiguous().unsqueeze(2)
mask_v = mask.transpose(0, 1).contiguous()
return fea_v, tg_v, mask_v
def repack_gd(self, feature, target, current):
"""packer for greedy loss
args:
feature (Seq_len, Batch_size): input feature
target (Seq_len, Batch_size): output target
current (Seq_len, Batch_size): current state
return:
feature (Seq_len, Batch_size), target (Seq_len * Batch_size), current (Seq_len * Batch_size, 1, 1)
"""
if self.if_cuda:
fea_v = feature.transpose(0, 1).cuda()
ts_v = target.transpose(0, 1).cuda().view(-1)
cs_v = current.transpose(0, 1).cuda().view(-1, 1, 1)
else:
fea_v = feature.transpose(0, 1)
ts_v = target.transpose(0, 1).contiguous().view(-1)
cs_v = current.transpose(0, 1).contiguous().view(-1, 1, 1)
return fea_v, ts_v, cs_v
def convert_for_eval(self, target):
"""convert target to original decoding
args:
target: input labels used in training
return:
output labels used in test
"""
return target % self.tagset_size
class CRFRepack_WC:
"""Packer for model with char-level and word-level
args:
tagset_size: target_set_size
if_cuda: whether use GPU
"""
def __init__(self, tagset_size, if_cuda):
self.tagset_size = tagset_size
self.if_cuda = if_cuda
def repack_vb(self, fc_feature, fc_position, bc_feature, bc_position, word_feature, target, mask, batch_len):
"""packer for viterbi loss
args:
fc_feature (Char_Seq_len, Batch_size) : forward_char input feature
fc_position (Word_Seq_len, Batch_size) : forward_char input position
bc_feature (Char_Seq_len, Batch_size) : backward_char input feature
bc_position (Word_Seq_len, Batch_size) : backward_char input position
word_feature (Word_Seq_len, Batch_size) : input word feature
target (Seq_len, Batch_size) : output target
mask (Word_Seq_len, Batch_size) : padding mask
batch_len (Batch_size, 2) : length of instances in one batch
return:
f_f (Char_Reduced_Seq_len, Batch_size), f_p (Word_Reduced_Seq_len, Batch_size), b_f (Char_Reduced_Seq_len, Batch_size), b_p (Word_Reduced_Seq_len, Batch_size), w_f (size Word_Seq_Len, Batch_size), target (Reduced_Seq_len, Batch_size), mask (Word_Reduced_Seq_len, Batch_size)
"""
mlen, _ = batch_len.max(0)
mlen = mlen.squeeze()
ocl = bc_feature.size(1)
if self.if_cuda:
fc_feature = fc_feature[:, 0:mlen[0]].transpose(0, 1).cuda()
fc_position = fc_position[:, 0:mlen[1]].transpose(0, 1).cuda()
bc_feature = bc_feature[:, -mlen[0]:].transpose(0, 1).cuda()
bc_position = (bc_position[:, 0:mlen[1]] - ocl + mlen[0]).transpose(0, 1).cuda()
word_feature = word_feature[:, 0:mlen[1]].transpose(0, 1).cuda()
tg_v = target[:, 0:mlen[1]].transpose(0, 1).unsqueeze(2).cuda()
mask_v = mask[:, 0:mlen[1]].transpose(0, 1).cuda()
else:
fc_feature = fc_feature[:, 0:mlen[0]].transpose(0, 1)
fc_position = fc_position[:, 0:mlen[1]].transpose(0, 1)
bc_feature = bc_feature[:, -mlen[0]:].transpose(0, 1)
bc_position = (bc_position[:, 0:mlen[1]] - ocl + mlen[0]).transpose(0, 1)
word_feature = word_feature[:, 0:mlen[1]].transpose(0, 1)
tg_v = target[:, 0:mlen[1]].transpose(0, 1).unsqueeze(2)
mask_v = mask[:, 0:mlen[1]].transpose(0, 1).contiguous()
return fc_feature, fc_position, bc_feature, bc_position, word_feature, tg_v, mask_v
def convert_for_eval(self, target):
"""convert for eval
args:
target: input labels used in training
return:
output labels used in test
"""
return target % self.tagset_size
class CRFLoss_gd(nn.Module):
"""loss for greedy decode loss, i.e., although its for CRF Layer, we calculate the loss as
.. math::
\sum_{j=1}^n \log (p(\hat{y}_{j+1}|z_{j+1}, \hat{y}_{j}))
instead of
.. math::
\sum_{j=1}^n \log (\phi(\hat{y}_{j-1}, \hat{y}_j, \mathbf{z}_j)) - \log (\sum_{\mathbf{y}' \in \mathbf{Y}(\mathbf{Z})} \prod_{j=1}^n \phi(y'_{j-1}, y'_j, \mathbf{z}_j) )
args:
tagset_size: target_set_size
start_tag: ind for <start>
end_tag: ind for <pad>
average_batch: whether average the loss among batch
"""
def __init__(self, tagset_size, start_tag, end_tag, average_batch=True):
super(CRFLoss_gd, self).__init__()
self.tagset_size = tagset_size
self.average_batch = average_batch
self.crit = nn.CrossEntropyLoss(size_average=self.average_batch)
def forward(self, scores, target, current):
"""
args:
scores (Word_Seq_len, Batch_size, target_size_from, target_size_to): crf scores
target (Word_Seq_len, Batch_size): golden list
current (Word_Seq_len, Batch_size): current state
return:
crf greedy loss
"""
ins_num = current.size(0)
current = current.expand(ins_num, 1, self.tagset_size)
scores = scores.view(ins_num, self.tagset_size, self.tagset_size)
current_score = torch.gather(scores, 1, current).squeeze()
return self.crit(current_score, target)
class CRFLoss_vb(nn.Module):
"""loss for viterbi decode
.. math::
\sum_{j=1}^n \log (\phi(\hat{y}_{j-1}, \hat{y}_j, \mathbf{z}_j)) - \log (\sum_{\mathbf{y}' \in \mathbf{Y}(\mathbf{Z})} \prod_{j=1}^n \phi(y'_{j-1}, y'_j, \mathbf{z}_j) )
args:
tagset_size: target_set_size
start_tag: ind for <start>
end_tag: ind for <pad>
average_batch: whether average the loss among batch
"""
def __init__(self, tagset_size, start_tag, end_tag, average_batch=True):
super(CRFLoss_vb, self).__init__()
self.tagset_size = tagset_size
self.start_tag = start_tag
self.end_tag = end_tag
self.average_batch = average_batch
def forward(self, scores, target, mask):
"""
args:
scores (seq_len, bat_size, target_size_from, target_size_to) : crf scores
target (seq_len, bat_size, 1) : golden state
mask (size seq_len, bat_size) : mask for padding
return:
loss
"""
# calculate batch size and seq len
seq_len = scores.size(0)
bat_size = scores.size(1)
# calculate sentence score
tg_energy = torch.gather(scores.view(seq_len, bat_size, -1), 2, target).view(seq_len, bat_size) # seq_len * bat_size
tg_energy = tg_energy.masked_select(mask).sum()
# calculate forward partition score
# build iter
seq_iter = enumerate(scores)
# the first score should start with <start>
_, inivalues = seq_iter.__next__() # bat_size * from_target_size * to_target_size
# only need start from start_tag
partition = inivalues[:, self.start_tag, :].clone() # bat_size * to_target_size
# iter over last scores
for idx, cur_values in seq_iter:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * from_target)
# cur_values: bat_size * from_target * to_target
cur_values = cur_values + partition.contiguous().view(bat_size, self.tagset_size, 1).expand(bat_size, self.tagset_size, self.tagset_size)
cur_partition = utils.log_sum_exp(cur_values, self.tagset_size)
# (bat_size * from_target * to_target) -> (bat_size * to_target)
# partition = utils.switch(partition, cur_partition, mask[idx].view(bat_size, 1).expand(bat_size, self.tagset_size)).view(bat_size, -1)
mask_idx = mask[idx, :].view(bat_size, 1).expand(bat_size, self.tagset_size)
partition.masked_scatter_(mask_idx, cur_partition.masked_select(mask_idx)) #0 for partition, 1 for cur_partition
#only need end at end_tag
partition = partition[:, self.end_tag].sum()
# average = mask.sum()
# average_batch
if self.average_batch:
loss = (partition - tg_energy) / bat_size
else:
loss = (partition - tg_energy)
return loss
class CRFDecode_vb():
"""Batch-mode viterbi decode
args:
tagset_size: target_set_size
start_tag: ind for <start>
end_tag: ind for <pad>
average_batch: whether average the loss among batch
"""
def __init__(self, tagset_size, start_tag, end_tag, average_batch=True):
self.tagset_size = tagset_size
self.start_tag = start_tag
self.end_tag = end_tag
self.average_batch = average_batch
def decode(self, scores, mask):
"""Find the optimal path with viterbe decode
args:
scores (size seq_len, bat_size, target_size_from, target_size_to) : crf scores
mask (seq_len, bat_size) : mask for padding
return:
decoded sequence (size seq_len, bat_size)
"""
# calculate batch size and seq len
seq_len = scores.size(0)
bat_size = scores.size(1)
mask = 1 - mask
decode_idx = torch.LongTensor(seq_len-1, bat_size)
# calculate forward score and checkpoint
# build iter
seq_iter = enumerate(scores)
# the first score should start with <start>
_, inivalues = seq_iter.__next__() # bat_size * from_target_size * to_target_size
# only need start from start_tag
forscores = inivalues[:, self.start_tag, :] # bat_size * to_target_size
back_points = list()
# iter over last scores
for idx, cur_values in seq_iter:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * from_target)
# cur_values: bat_size * from_target * to_target
cur_values = cur_values + forscores.contiguous().view(bat_size, self.tagset_size, 1).expand(bat_size, self.tagset_size, self.tagset_size)
forscores, cur_bp = torch.max(cur_values, 1)
cur_bp.masked_fill_(mask[idx].view(bat_size, 1).expand(bat_size, self.tagset_size), self.end_tag)
back_points.append(cur_bp)
pointer = back_points[-1][:, self.end_tag]
decode_idx[-1] = pointer
for idx in range(len(back_points)-2, -1, -1):
back_point = back_points[idx]
index = pointer.contiguous().view(-1,1)
pointer = torch.gather(back_point, 1, index).view(-1)
decode_idx[idx] = pointer
return decode_idx
| 14,450 | 36.73107 | 287 | py |
MICRO | MICRO-main/codes/main.py | from datetime import datetime
import math
import os
import random
import sys
from time import time
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.sparse as sparse
from utility.parser import parse_args
from Models import MICRO
from utility.batch_test import *
from utility.logging import Logger
args = parse_args()
class Trainer(object):
def __init__(self, data_config):
# argument settings
self.n_users = data_config['n_users']
self.n_items = data_config['n_items']
self.task_name = "%s_%s_%s" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), args.dataset, args.cf_model,)
self.logger = Logger(filename=self.task_name, is_debug=args.debug)
self.logger.logging("PID: %d" % os.getpid())
self.logger.logging(str(args))
self.mess_dropout = eval(args.mess_dropout)
self.lr = args.lr
self.emb_dim = args.embed_size
self.batch_size = args.batch_size
self.weight_size = eval(args.weight_size)
self.n_layers = len(self.weight_size)
self.regs = eval(args.regs)
self.decay = self.regs[0]
self.norm_adj = data_config['norm_adj']
self.norm_adj = self.sparse_mx_to_torch_sparse_tensor(self.norm_adj).float().cuda()
image_feats = np.load('../data/{}/image_feat.npy'.format(args.dataset))
text_feats = np.load('../data/{}/text_feat.npy'.format(args.dataset))
self.model = MICRO(self.n_users, self.n_items, self.emb_dim, self.weight_size, self.mess_dropout, image_feats, text_feats)
self.model = self.model.cuda()
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
self.lr_scheduler = self.set_lr_scheduler()
def set_lr_scheduler(self):
fac = lambda epoch: 0.96 ** (epoch / 50)
scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=fac)
return scheduler
def test(self, users_to_test, is_val):
self.model.eval()
with torch.no_grad():
ua_embeddings, ia_embeddings, *rest = self.model(self.norm_adj, build_item_graph=True)
result = test_torch(ua_embeddings, ia_embeddings, users_to_test, is_val)
return result
def train(self):
training_time_list = []
loss_loger, pre_loger, rec_loger, ndcg_loger, hit_loger = [], [], [], [], []
stopping_step = 0
should_stop = False
cur_best_pre_0 = 0.
n_batch = data_generator.n_train // args.batch_size + 1
best_recall = 0
for epoch in (range(args.epoch)):
t1 = time()
loss, mf_loss, emb_loss, reg_loss = 0., 0., 0., 0.
contrastive_loss = 0.
n_batch = data_generator.n_train // args.batch_size + 1
f_time, b_time, loss_time, opt_time, clip_time, emb_time = 0., 0., 0., 0., 0., 0.
sample_time = 0.
build_item_graph = True
for idx in (range(n_batch)):
self.model.train()
self.optimizer.zero_grad()
sample_t1 = time()
users, pos_items, neg_items = data_generator.sample()
sample_time += time() - sample_t1
ua_embeddings, ia_embeddings, image_item_embeds, text_item_embeds, fusion_embed = self.model(self.norm_adj, build_item_graph=build_item_graph)
build_item_graph = False
u_g_embeddings = ua_embeddings[users]
pos_i_g_embeddings = ia_embeddings[pos_items]
neg_i_g_embeddings = ia_embeddings[neg_items]
batch_mf_loss, batch_emb_loss, batch_reg_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings,
neg_i_g_embeddings)
batch_contrastive_loss = 0
batch_contrastive_loss += self.model.batched_contrastive_loss(image_item_embeds,fusion_embed)
batch_contrastive_loss += self.model.batched_contrastive_loss(text_item_embeds,fusion_embed)
batch_contrastive_loss *= args.loss_ratio
batch_loss = batch_mf_loss + batch_emb_loss + batch_reg_loss + batch_contrastive_loss
batch_loss.backward(retain_graph=False)
self.optimizer.step()
loss += float(batch_loss)
mf_loss += float(batch_mf_loss)
emb_loss += float(batch_emb_loss)
reg_loss += float(batch_reg_loss)
contrastive_loss += float(batch_contrastive_loss)
self.lr_scheduler.step()
del ua_embeddings, ia_embeddings, u_g_embeddings, neg_i_g_embeddings, pos_i_g_embeddings
if math.isnan(loss) == True:
self.logger.logging('ERROR: loss is nan.')
sys.exit()
if (epoch + 1) % args.verbose != 0:
perf_str = 'Epoch %d [%.1fs]: train==[%.5f=%.5f + %.5f + %.5f]' % (
epoch, time() - t1, loss, mf_loss, emb_loss, reg_loss)
training_time_list.append(time() - t1)
self.logger.logging(perf_str)
continue
t2 = time()
users_to_test = list(data_generator.test_set.keys())
users_to_val = list(data_generator.val_set.keys())
ret = self.test(users_to_val, is_val=True)
training_time_list.append(t2 - t1)
t3 = time()
loss_loger.append(loss)
rec_loger.append(ret['recall'])
pre_loger.append(ret['precision'])
ndcg_loger.append(ret['ndcg'])
hit_loger.append(ret['hit_ratio'])
if args.verbose > 0:
perf_str = 'Epoch %d [%.1fs + %.1fs]: train==[%.5f=%.5f + %.5f + %.5f], recall=[%.5f, %.5f], ' \
'precision=[%.5f, %.5f], hit=[%.5f, %.5f], ndcg=[%.5f, %.5f]' % \
(epoch, t2 - t1, t3 - t2, loss, mf_loss, emb_loss, reg_loss, ret['recall'][0],
ret['recall'][-1],
ret['precision'][0], ret['precision'][-1], ret['hit_ratio'][0], ret['hit_ratio'][-1],
ret['ndcg'][0], ret['ndcg'][-1])
self.logger.logging(perf_str)
if ret['recall'][1] > best_recall:
best_recall = ret['recall'][1]
test_ret = self.test(users_to_test, is_val=False)
self.logger.logging("Test_Recall@%d: %.5f" % (eval(args.Ks)[1], test_ret['recall'][1]))
stopping_step = 0
elif stopping_step < args.early_stopping_patience:
stopping_step += 1
self.logger.logging('#####Early stopping steps: %d #####' % stopping_step)
else:
self.logger.logging('#####Early stop! #####')
break
self.logger.logging(str(test_ret))
def bpr_loss(self, users, pos_items, neg_items):
pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)
neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)
regularizer = 1./2*(users**2).sum() + 1./2*(pos_items**2).sum() + 1./2*(neg_items**2).sum()
regularizer = regularizer / self.batch_size
maxi = F.logsigmoid(pos_scores - neg_scores)
mf_loss = -torch.mean(maxi)
emb_loss = self.decay * regularizer
reg_loss = 0.0
return mf_loss, emb_loss, reg_loss
def sparse_mx_to_torch_sparse_tensor(self, sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def set_seed(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed) # cpu
torch.cuda.manual_seed_all(seed) # gpu
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
set_seed(args.seed)
config = dict()
config['n_users'] = data_generator.n_users
config['n_items'] = data_generator.n_items
plain_adj, norm_adj, mean_adj = data_generator.get_adj_mat()
config['norm_adj'] = norm_adj
trainer = Trainer(data_config=config)
trainer.train()
| 8,593 | 39.92381 | 158 | py |
MICRO | MICRO-main/codes/Models.py | import os
import numpy as np
from time import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from utility.parser import parse_args
from utility.norm import build_sim, build_knn_normalized_graph
args = parse_args()
class MICRO(nn.Module):
def __init__(self, n_users, n_items, embedding_dim, weight_size, dropout_list, image_feats, text_feats):
super().__init__()
self.n_users = n_users
self.n_items = n_items
self.embedding_dim = embedding_dim
self.weight_size = weight_size
self.n_ui_layers = len(self.weight_size)
self.weight_size = [self.embedding_dim] + self.weight_size
self.user_embedding = nn.Embedding(n_users, self.embedding_dim)
self.item_id_embedding = nn.Embedding(n_items, self.embedding_dim)
nn.init.xavier_uniform_(self.user_embedding.weight)
nn.init.xavier_uniform_(self.item_id_embedding.weight)
if args.cf_model == 'ngcf':
self.GC_Linear_list = nn.ModuleList()
self.Bi_Linear_list = nn.ModuleList()
self.dropout_list = nn.ModuleList()
for i in range(self.n_ui_layers):
self.GC_Linear_list.append(nn.Linear(self.weight_size[i], self.weight_size[i+1]))
self.Bi_Linear_list.append(nn.Linear(self.weight_size[i], self.weight_size[i+1]))
self.dropout_list.append(nn.Dropout(dropout_list[i]))
self.image_embedding = nn.Embedding.from_pretrained(torch.Tensor(image_feats), freeze=False)
self.text_embedding = nn.Embedding.from_pretrained(torch.Tensor(text_feats), freeze=False)
image_adj = build_sim(self.image_embedding.weight.detach())
image_adj = build_knn_normalized_graph(image_adj, topk=args.topk, is_sparse=args.sparse, norm_type=args.norm_type)
text_adj = build_sim(self.text_embedding.weight.detach())
text_adj = build_knn_normalized_graph(text_adj, topk=args.topk, is_sparse=args.sparse, norm_type=args.norm_type)
self.text_original_adj = text_adj.cuda()
self.image_original_adj = image_adj.cuda()
self.image_trs = nn.Linear(image_feats.shape[1], args.embed_size)
self.text_trs = nn.Linear(text_feats.shape[1], args.embed_size)
self.softmax = nn.Softmax(dim=-1)
self.query = nn.Sequential(
nn.Linear(self.embedding_dim, self.embedding_dim),
nn.Tanh(),
nn.Linear(self.embedding_dim, 1, bias=False)
)
self.tau = 0.5
def mm(self, x, y):
if args.sparse:
return torch.sparse.mm(x, y)
else:
return torch.mm(x, y)
def sim(self, z1, z2):
z1 = F.normalize(z1)
z2 = F.normalize(z2)
return torch.mm(z1, z2.t())
def batched_contrastive_loss(self, z1, z2, batch_size=4096):
device = z1.device
num_nodes = z1.size(0)
num_batches = (num_nodes - 1) // batch_size + 1
f = lambda x: torch.exp(x / self.tau)
indices = torch.arange(0, num_nodes).to(device)
losses = []
for i in range(num_batches):
mask = indices[i * batch_size:(i + 1) * batch_size]
refl_sim = f(self.sim(z1[mask], z1)) # [B, N]
between_sim = f(self.sim(z1[mask], z2)) # [B, N]
losses.append(-torch.log(
between_sim[:, i * batch_size:(i + 1) * batch_size].diag()
/ (refl_sim.sum(1) + between_sim.sum(1)
- refl_sim[:, i * batch_size:(i + 1) * batch_size].diag())))
loss_vec = torch.cat(losses)
return loss_vec.mean()
def forward(self, adj, build_item_graph=False):
image_feats = self.image_trs(self.image_embedding.weight)
text_feats = self.text_trs(self.text_embedding.weight)
if build_item_graph:
self.image_adj = build_sim(image_feats)
self.image_adj = build_knn_normalized_graph(self.image_adj, topk=args.topk, is_sparse=args.sparse, norm_type=args.norm_type)
self.image_adj = (1 - args.lambda_coeff) * self.image_adj + args.lambda_coeff * self.image_original_adj
self.text_adj = build_sim(text_feats)
self.text_adj = build_knn_normalized_graph(self.text_adj, topk=args.topk, is_sparse=args.sparse, norm_type=args.norm_type)
self.text_adj = (1 - args.lambda_coeff) * self.text_adj + args.lambda_coeff * self.text_original_adj
else:
self.image_adj = self.image_adj.detach()
self.text_adj = self.text_adj.detach()
image_item_embeds = self.item_id_embedding.weight
text_item_embeds = self.item_id_embedding.weight
for i in range(args.layers):
image_item_embeds = self.mm(self.image_adj, image_item_embeds)
for i in range(args.layers):
text_item_embeds = self.mm(self.text_adj, text_item_embeds)
att = torch.cat([self.query(image_item_embeds), self.query(text_item_embeds)], dim=-1)
weight = self.softmax(att)
h = weight[:, 0].unsqueeze(dim=1) * image_item_embeds + weight[:, 1].unsqueeze(dim=1) * text_item_embeds
if args.cf_model == 'ngcf':
ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0)
all_embeddings = [ego_embeddings]
for i in range(self.n_ui_layers):
side_embeddings = torch.sparse.mm(adj, ego_embeddings)
sum_embeddings = F.leaky_relu(self.GC_Linear_list[i](side_embeddings))
bi_embeddings = torch.mul(ego_embeddings, side_embeddings)
bi_embeddings = F.leaky_relu(self.Bi_Linear_list[i](bi_embeddings))
ego_embeddings = sum_embeddings + bi_embeddings
ego_embeddings = self.dropout_list[i](ego_embeddings)
norm_embeddings = F.normalize(ego_embeddings, p=2, dim=1)
all_embeddings += [norm_embeddings]
all_embeddings = torch.stack(all_embeddings, dim=1)
all_embeddings = all_embeddings.mean(dim=1, keepdim=False)
u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0)
i_g_embeddings = i_g_embeddings + F.normalize(h, p=2, dim=1)
return u_g_embeddings, i_g_embeddings, image_item_embeds, text_item_embeds, h
elif args.cf_model == 'lightgcn':
ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0)
all_embeddings = [ego_embeddings]
for i in range(self.n_ui_layers):
side_embeddings = torch.sparse.mm(adj, ego_embeddings)
ego_embeddings = side_embeddings
all_embeddings += [ego_embeddings]
all_embeddings = torch.stack(all_embeddings, dim=1)
all_embeddings = all_embeddings.mean(dim=1, keepdim=False)
u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0)
i_g_embeddings = i_g_embeddings + F.normalize(h, p=2, dim=1)
return u_g_embeddings, i_g_embeddings, image_item_embeds, text_item_embeds, h
elif args.cf_model == 'mf':
return self.user_embedding.weight, self.item_id_embedding.weight + F.normalize(h, p=2, dim=1), image_item_embeds, text_item_embeds, h
class MF(nn.Module):
def __init__(self, n_users, n_items, embedding_dim, weight_size, dropout_list, image_feats=None, text_feats=None):
super().__init__()
self.n_users = n_users
self.n_items = n_items
self.embedding_dim = embedding_dim
self.user_embedding = nn.Embedding(n_users, embedding_dim)
self.item_embedding = nn.Embedding(n_items, embedding_dim)
nn.init.xavier_uniform_(self.user_embedding.weight)
nn.init.xavier_uniform_(self.item_embedding.weight)
def forward(self, adj, build_item_graph=False):
return self.user_embedding.weight, self.item_embedding.weight
class NGCF(nn.Module):
def __init__(self, n_users, n_items, embedding_dim, weight_size, dropout_list, image_feats=None, text_feats=None):
super().__init__()
self.n_users = n_users
self.n_items = n_items
self.embedding_dim = embedding_dim
self.weight_size = weight_size
self.n_ui_layers = len(self.weight_size)
self.dropout_list = nn.ModuleList()
self.GC_Linear_list = nn.ModuleList()
self.Bi_Linear_list = nn.ModuleList()
self.weight_size = [self.embedding_dim] + self.weight_size
for i in range(self.n_ui_layers):
self.GC_Linear_list.append(nn.Linear(self.weight_size[i], self.weight_size[i+1]))
self.Bi_Linear_list.append(nn.Linear(self.weight_size[i], self.weight_size[i+1]))
self.dropout_list.append(nn.Dropout(dropout_list[i]))
self.user_embedding = nn.Embedding(n_users, embedding_dim)
self.item_id_embedding = nn.Embedding(n_items, embedding_dim)
nn.init.xavier_uniform_(self.user_embedding.weight)
nn.init.xavier_uniform_(self.item_id_embedding.weight)
def forward(self, adj, build_item_graph):
ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0)
all_embeddings = [ego_embeddings]
for i in range(self.n_ui_layers):
side_embeddings = torch.sparse.mm(adj, ego_embeddings)
sum_embeddings = F.leaky_relu(self.GC_Linear_list[i](side_embeddings))
bi_embeddings = torch.mul(ego_embeddings, side_embeddings)
bi_embeddings = F.leaky_relu(self.Bi_Linear_list[i](bi_embeddings))
ego_embeddings = sum_embeddings + bi_embeddings
ego_embeddings = self.dropout_list[i](ego_embeddings)
norm_embeddings = F.normalize(ego_embeddings, p=2, dim=1)
all_embeddings += [norm_embeddings]
all_embeddings = torch.cat(all_embeddings, dim=1)
u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0)
return u_g_embeddings, i_g_embeddings
class LightGCN(nn.Module):
def __init__(self, n_users, n_items, embedding_dim, weight_size, dropout_list, image_feats=None, text_feats=None):
super().__init__()
self.n_users = n_users
self.n_items = n_items
self.embedding_dim = embedding_dim
self.n_ui_layers = len(weight_size)
self.user_embedding = nn.Embedding(n_users, embedding_dim)
self.item_id_embedding = nn.Embedding(n_items, embedding_dim)
nn.init.xavier_uniform_(self.user_embedding.weight)
nn.init.xavier_uniform_(self.item_id_embedding.weight)
def forward(self, adj, build_item_graph):
ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0)
all_embeddings = [ego_embeddings]
for i in range(self.n_ui_layers):
side_embeddings = torch.sparse.mm(adj, ego_embeddings)
ego_embeddings = side_embeddings
all_embeddings += [ego_embeddings]
all_embeddings = torch.stack(all_embeddings, dim=1)
all_embeddings = all_embeddings.mean(dim=1, keepdim=False)
u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0)
return u_g_embeddings, i_g_embeddings | 11,458 | 46.156379 | 149 | py |
MICRO | MICRO-main/codes/utility/norm.py | import torch
def build_sim(context):
context_norm = context.div(torch.norm(context, p=2, dim=-1, keepdim=True))
sim = torch.mm(context_norm, context_norm.transpose(1, 0))
return sim
def build_knn_normalized_graph(adj, topk, is_sparse, norm_type):
device = adj.device
knn_val, knn_ind = torch.topk(adj, topk, dim=-1)
if is_sparse:
tuple_list = [[row, int(col)] for row in range(len(knn_ind)) for col in knn_ind[row]]
row = [i[0] for i in tuple_list]
col = [i[1] for i in tuple_list]
i = torch.LongTensor([row, col]).to(device)
v = knn_val.flatten()
edge_index, edge_weight = get_sparse_laplacian(i, v, normalization=norm_type, num_nodes=adj.shape[0])
return torch.sparse_coo_tensor(edge_index, edge_weight, adj.shape)
else:
weighted_adjacency_matrix = (torch.zeros_like(adj)).scatter_(-1, knn_ind, knn_val)
return get_dense_laplacian(weighted_adjacency_matrix, normalization=norm_type)
def get_sparse_laplacian(edge_index, edge_weight, num_nodes, normalization='none'):
from torch_scatter import scatter_add
row, col = edge_index[0], edge_index[1]
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
if normalization == 'sym':
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
edge_weight = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
elif normalization == 'rw':
deg_inv = 1.0 / deg
deg_inv.masked_fill_(deg_inv == float('inf'), 0)
edge_weight = deg_inv[row] * edge_weight
return edge_index, edge_weight
def get_dense_laplacian(adj, normalization='none'):
if normalization == 'sym':
rowsum = torch.sum(adj, -1)
d_inv_sqrt = torch.pow(rowsum, -0.5)
d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = torch.diagflat(d_inv_sqrt)
L_norm = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
elif normalization == 'rw':
rowsum = torch.sum(adj, -1)
d_inv = torch.pow(rowsum, -1)
d_inv[torch.isinf(d_inv)] = 0.
d_mat_inv = torch.diagflat(d_inv)
L_norm = torch.mm(d_mat_inv, adj)
elif normalization == 'none':
L_norm = adj
return L_norm
| 2,279 | 40.454545 | 109 | py |
MICRO | MICRO-main/codes/utility/batch_test.py | import utility.metrics as metrics
from utility.parser import parse_args
from utility.load_data import Data
import multiprocessing
import heapq
import torch
import pickle
import numpy as np
from time import time
cores = multiprocessing.cpu_count() // 5
args = parse_args()
Ks = eval(args.Ks)
data_generator = Data(path=args.data_path + args.dataset, batch_size=args.batch_size)
USR_NUM, ITEM_NUM = data_generator.n_users, data_generator.n_items
N_TRAIN, N_TEST = data_generator.n_train, data_generator.n_test
BATCH_SIZE = args.batch_size
def ranklist_by_heapq(user_pos_test, test_items, rating, Ks):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
r = []
for i in K_max_item_score:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = 0.
return r, auc
def get_auc(item_score, user_pos_test):
item_score = sorted(item_score.items(), key=lambda kv: kv[1])
item_score.reverse()
item_sort = [x[0] for x in item_score]
posterior = [x[1] for x in item_score]
r = []
for i in item_sort:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = metrics.auc(ground_truth=r, prediction=posterior)
return auc
def ranklist_by_sorted(user_pos_test, test_items, rating, Ks):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
r = []
for i in K_max_item_score:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = get_auc(item_score, user_pos_test)
return r, auc
def get_performance(user_pos_test, r, auc, Ks):
precision, recall, ndcg, hit_ratio = [], [], [], []
for K in Ks:
precision.append(metrics.precision_at_k(r, K))
recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
ndcg.append(metrics.ndcg_at_k(r, K))
hit_ratio.append(metrics.hit_at_k(r, K))
return {'recall': np.array(recall), 'precision': np.array(precision),
'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}
def test_one_user(x):
# user u's ratings for user u
is_val = x[-1]
rating = x[0]
#uid
u = x[1]
#user u's items in the training set
try:
training_items = data_generator.train_items[u]
except Exception:
training_items = []
#user u's items in the test set
if is_val:
user_pos_test = data_generator.val_set[u]
else:
user_pos_test = data_generator.test_set[u]
all_items = set(range(ITEM_NUM))
test_items = list(all_items - set(training_items))
if args.test_flag == 'part':
r, auc = ranklist_by_heapq(user_pos_test, test_items, rating, Ks)
else:
r, auc = ranklist_by_sorted(user_pos_test, test_items, rating, Ks)
return get_performance(user_pos_test, r, auc, Ks)
def test_torch(ua_embeddings, ia_embeddings, users_to_test, is_val, drop_flag=False, batch_test_flag=False):
result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),
'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}
pool = multiprocessing.Pool(cores)
u_batch_size = BATCH_SIZE * 2
i_batch_size = BATCH_SIZE
test_users = users_to_test
n_test_users = len(test_users)
n_user_batchs = n_test_users // u_batch_size + 1
count = 0
for u_batch_id in range(n_user_batchs):
start = u_batch_id * u_batch_size
end = (u_batch_id + 1) * u_batch_size
user_batch = test_users[start: end]
if batch_test_flag:
n_item_batchs = ITEM_NUM // i_batch_size + 1
rate_batch = np.zeros(shape=(len(user_batch), ITEM_NUM))
i_count = 0
for i_batch_id in range(n_item_batchs):
i_start = i_batch_id * i_batch_size
i_end = min((i_batch_id + 1) * i_batch_size, ITEM_NUM)
item_batch = range(i_start, i_end)
u_g_embeddings = ua_embeddings[user_batch]
i_g_embeddings = ia_embeddings[item_batch]
i_rate_batch = torch.matmul(u_g_embeddings, torch.transpose(i_g_embeddings, 0, 1))
rate_batch[:, i_start: i_end] = i_rate_batch
i_count += i_rate_batch.shape[1]
assert i_count == ITEM_NUM
else:
item_batch = range(ITEM_NUM)
u_g_embeddings = ua_embeddings[user_batch]
i_g_embeddings = ia_embeddings[item_batch]
rate_batch = torch.matmul(u_g_embeddings, torch.transpose(i_g_embeddings, 0, 1))
rate_batch = rate_batch.detach().cpu().numpy()
user_batch_rating_uid = zip(rate_batch, user_batch, [is_val] * len(user_batch))
batch_result = pool.map(test_one_user, user_batch_rating_uid)
count += len(batch_result)
for re in batch_result:
result['precision'] += re['precision'] / n_test_users
result['recall'] += re['recall'] / n_test_users
result['ndcg'] += re['ndcg'] / n_test_users
result['hit_ratio'] += re['hit_ratio'] / n_test_users
result['auc'] += re['auc'] / n_test_users
assert count == n_test_users
pool.close()
return result
| 5,454 | 31.088235 | 108 | py |
reformer-pytorch | reformer-pytorch-master/setup.py | from setuptools import setup, find_packages
setup(
name = 'reformer_pytorch',
packages = find_packages(exclude=['examples', 'pretraining']),
version = '1.4.4',
license='MIT',
description = 'Reformer, the Efficient Transformer, Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/reformer-pytorch',
keywords = ['transformers', 'attention', 'artificial intelligence'],
install_requires=[
'axial-positional-embedding>=0.1.0',
'einops',
'local-attention',
'product-key-memory',
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| 841 | 29.071429 | 70 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.