repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ParallelWaveGAN | ParallelWaveGAN-master/test/test_layers.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
import logging
import numpy as np
import pytest
import torch
from parallel_wavegan.layers import (
PQMF,
CausalConv1d,
CausalConvTranspose1d,
Conv1d,
Conv1d1x1,
Conv2d,
ConvInUpsampleNetwork,
UpsampleNetwork,
)
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
def test_conv_initialization():
conv = Conv1d(10, 10, 3, bias=True)
np.testing.assert_array_equal(
conv.bias.data.numpy(), np.zeros_like(conv.bias.data.numpy())
)
conv1x1 = Conv1d1x1(10, 10, bias=True)
np.testing.assert_array_equal(
conv1x1.bias.data.numpy(), np.zeros_like(conv1x1.bias.data.numpy())
)
kernel_size = (10, 10)
conv2d = Conv2d(10, 10, kernel_size, bias=True)
np.testing.assert_array_equal(
conv2d.weight.data.numpy(),
np.ones_like(conv2d.weight.data.numpy()) / np.prod(kernel_size),
)
np.testing.assert_array_equal(
conv2d.bias.data.numpy(), np.zeros_like(conv2d.bias.data.numpy())
)
kernel_size = (1, 10)
conv2d = Conv2d(10, 10, kernel_size, bias=True)
np.testing.assert_array_equal(
conv2d.weight.data.numpy(),
np.ones_like(conv2d.weight.data.numpy()) / np.prod(kernel_size),
)
np.testing.assert_array_equal(
conv2d.bias.data.numpy(), np.zeros_like(conv2d.bias.data.numpy())
)
@pytest.mark.parametrize(
"use_causal_conv",
[
(False),
(True),
],
)
def test_upsample(use_causal_conv):
length = 10
scales = [4, 4]
x = torch.randn(1, 10, length)
upsample = UpsampleNetwork(scales)
y = upsample(x)
assert x.size(-1) * np.prod(scales) == y.size(-1)
for aux_context_window in [0, 1, 2, 3]:
conv_upsample = ConvInUpsampleNetwork(
scales,
aux_channels=x.size(1),
aux_context_window=aux_context_window,
use_causal_conv=use_causal_conv,
)
y = conv_upsample(x)
assert (x.size(-1) - 2 * aux_context_window) * np.prod(scales) == y.size(-1)
@torch.no_grad()
@pytest.mark.parametrize(
"kernel_size, dilation, pad, pad_params",
[
(3, 1, "ConstantPad1d", {"value": 0.0}),
(3, 3, "ConstantPad1d", {"value": 0.0}),
(2, 1, "ConstantPad1d", {"value": 0.0}),
(2, 3, "ConstantPad1d", {"value": 0.0}),
(5, 1, "ConstantPad1d", {"value": 0.0}),
(5, 3, "ConstantPad1d", {"value": 0.0}),
(3, 3, "ReflectionPad1d", {}),
(2, 1, "ReflectionPad1d", {}),
(2, 3, "ReflectionPad1d", {}),
(5, 1, "ReflectionPad1d", {}),
(5, 3, "ReflectionPad1d", {}),
],
)
def test_causal_conv(kernel_size, dilation, pad, pad_params):
x = torch.randn(1, 1, 32)
conv = CausalConv1d(1, 1, kernel_size, dilation, pad=pad, pad_params=pad_params)
y1 = conv(x)
x[:, :, 16:] += torch.randn(1, 1, 16)
y2 = conv(x)
assert x.size(2) == y1.size(2)
np.testing.assert_array_equal(
y1[:, :, :16].cpu().numpy(),
y2[:, :, :16].cpu().numpy(),
)
@torch.no_grad()
@pytest.mark.parametrize(
"kernel_size, stride",
[
(4, 2),
(6, 3),
(10, 5),
],
)
def test_causal_conv_transpose(kernel_size, stride):
deconv = CausalConvTranspose1d(1, 1, kernel_size, stride)
x = torch.randn(1, 1, 32)
y1 = deconv(x)
x[:, :, 19:] += torch.randn(1, 1, 32 - 19)
y2 = deconv(x)
assert x.size(2) * stride == y1.size(2)
np.testing.assert_array_equal(
y1[:, :, : 19 * stride].cpu().numpy(),
y2[:, :, : 19 * stride].cpu().numpy(),
)
@pytest.mark.parametrize(
"subbands",
[
(3),
(4),
],
)
def test_pqmf(subbands):
pqmf = PQMF(subbands)
x = torch.randn(1, 1, subbands * 32)
y = pqmf.analysis(x)
assert y.shape[2] * subbands == x.shape[2]
x_hat = pqmf.synthesis(y)
assert x.shape[2] == x_hat.shape[2]
| 4,085 | 26.059603 | 84 | py |
ParallelWaveGAN | ParallelWaveGAN-master/test/test_mel_loss.py | #!/usr/bin/env python3
# Copyright 2021 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Test code for Mel-spectrogram loss modules."""
import numpy as np
import torch
from parallel_wavegan.bin.preprocess import logmelfilterbank
from parallel_wavegan.losses import MelSpectrogram
def test_mel_spectrogram_is_equal():
x = np.random.randn(22050)
x = np.abs(x) / np.max(np.abs(x))
mel_npy = logmelfilterbank(
x,
22050,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
eps=1e-10,
)
mel_spectrogram = MelSpectrogram(
fs=22050,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
eps=1e-10,
).to(dtype=torch.double)
mel_torch = mel_spectrogram(torch.from_numpy(x).unsqueeze(0))
np.testing.assert_array_almost_equal(
mel_npy.transpose(1, 0).astype(np.float32),
mel_torch[0].numpy().astype(np.float32),
)
| 1,117 | 22.787234 | 65 | py |
ParallelWaveGAN | ParallelWaveGAN-master/test/test_hifigan.py | #!/usr/bin/env python3
# Copyright 2021 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Test code for HiFi-GAN modules."""
import logging
import os
import numpy as np
import pytest
import torch
import yaml
from test_parallel_wavegan import make_mutli_reso_stft_loss_args
import parallel_wavegan.models
from parallel_wavegan.losses import (
DiscriminatorAdversarialLoss,
FeatureMatchLoss,
GeneratorAdversarialLoss,
MultiResolutionSTFTLoss,
)
from parallel_wavegan.models import (
HiFiGANGenerator,
HiFiGANMultiScaleMultiPeriodDiscriminator,
)
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
def make_hifigan_generator_args(**kwargs):
defaults = dict(
in_channels=80,
out_channels=1,
channels=512,
kernel_size=7,
upsample_scales=(8, 8, 2, 2),
upsample_kernel_sizes=(16, 16, 4, 4),
resblock_kernel_sizes=(3, 7, 11),
resblock_dilations=[(1, 3, 5), (1, 3, 5), (1, 3, 5)],
use_additional_convs=True,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
use_causal_conv=False,
)
defaults.update(kwargs)
return defaults
def make_hifigan_multi_scale_multi_period_discriminator_args(**kwargs):
defaults = dict(
scales=3,
scale_downsample_pooling="AvgPool1d",
scale_downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
scale_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 128,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm=False,
periods=[2, 3, 5, 7, 11],
period_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 128,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
)
defaults.update(kwargs)
return defaults
@pytest.mark.parametrize(
"dict_g, dict_d, dict_loss",
[
({}, {}, {}),
({}, {"scales": 1}, {}),
({}, {"periods": [2]}, {}),
({}, {"scales": 1, "periods": [2]}, {}),
({}, {"follow_official_norm": True}, {}),
({"use_additional_convs": False}, {}, {}),
],
)
def test_hifigan_trainable(dict_g, dict_d, dict_loss):
# setup
batch_size = 4
batch_length = 2**13
args_g = make_hifigan_generator_args(**dict_g)
args_d = make_hifigan_multi_scale_multi_period_discriminator_args(**dict_d)
args_loss = make_mutli_reso_stft_loss_args(**dict_loss)
y = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["in_channels"],
batch_length // np.prod(args_g["upsample_scales"]),
)
model_g = HiFiGANGenerator(**args_g)
model_d = HiFiGANMultiScaleMultiPeriodDiscriminator(**args_d)
aux_criterion = MultiResolutionSTFTLoss(**args_loss)
feat_match_criterion = FeatureMatchLoss(
average_by_layers=False,
average_by_discriminators=False,
include_final_outputs=True,
)
gen_adv_criterion = GeneratorAdversarialLoss(
average_by_discriminators=False,
)
dis_adv_criterion = DiscriminatorAdversarialLoss(
average_by_discriminators=False,
)
optimizer_g = torch.optim.AdamW(model_g.parameters())
optimizer_d = torch.optim.AdamW(model_d.parameters())
# check generator trainable
y_hat = model_g(c)
p_hat = model_d(y_hat)
sc_loss, mag_loss = aux_criterion(y_hat, y)
aux_loss = sc_loss + mag_loss
adv_loss = gen_adv_criterion(p_hat)
with torch.no_grad():
p = model_d(y)
fm_loss = feat_match_criterion(p_hat, p)
loss_g = adv_loss + aux_loss + fm_loss
optimizer_g.zero_grad()
loss_g.backward()
optimizer_g.step()
# check discriminator trainable
p = model_d(y)
p_hat = model_d(y_hat.detach())
real_loss, fake_loss = dis_adv_criterion(p_hat, p)
loss_d = real_loss + fake_loss
optimizer_d.zero_grad()
loss_d.backward()
optimizer_d.step()
print(model_d)
print(model_g)
@pytest.mark.parametrize(
"dict_g",
[
(
{
"use_causal_conv": True,
"upsample_scales": [5, 5, 4, 3],
"upsample_kernel_sizes": [10, 10, 8, 6],
}
),
(
{
"use_causal_conv": True,
"upsample_scales": [8, 8, 2, 2],
"upsample_kernel_sizes": [16, 16, 4, 4],
}
),
(
{
"use_causal_conv": True,
"upsample_scales": [4, 5, 4, 3],
"upsample_kernel_sizes": [8, 10, 8, 6],
}
),
(
{
"use_causal_conv": True,
"upsample_scales": [4, 4, 2, 2],
"upsample_kernel_sizes": [8, 8, 4, 4],
}
),
],
)
def test_causal_hifigan(dict_g):
batch_size = 4
batch_length = 8192
args_g = make_hifigan_generator_args(**dict_g)
upsampling_factor = np.prod(args_g["upsample_scales"])
c = torch.randn(
batch_size, args_g["in_channels"], batch_length // upsampling_factor
)
model_g = HiFiGANGenerator(**args_g)
c_ = c.clone()
c_[..., c.size(-1) // 2 :] = torch.randn(c[..., c.size(-1) // 2 :].shape)
try:
# check not equal
np.testing.assert_array_equal(c.numpy(), c_.numpy())
except AssertionError:
pass
else:
raise AssertionError("Must be different.")
# check causality
y = model_g(c)
y_ = model_g(c_)
assert y.size(2) == c.size(2) * upsampling_factor
np.testing.assert_array_equal(
y[..., : c.size(-1) // 2 * upsampling_factor].detach().cpu().numpy(),
y_[..., : c_.size(-1) // 2 * upsampling_factor].detach().cpu().numpy(),
)
def test_fix_norm_issue():
from parallel_wavegan.utils import download_pretrained_model
checkpoint = download_pretrained_model("ljspeech_hifigan.v1")
config = os.path.join(os.path.dirname(checkpoint), "config.yml")
with open(config) as f:
config = yaml.load(f, Loader=yaml.Loader)
# get model and load parameters
discriminator_type = config.get("discriminator_type")
model_class = getattr(
parallel_wavegan.models,
discriminator_type,
)
model = model_class(**config["discriminator_params"])
state_dict_org = model.state_dict()
model.load_state_dict(state_dict_org)
state_dict = torch.load(checkpoint, map_location="cpu")["model"]["discriminator"]
model.load_state_dict(state_dict, strict=False)
| 7,403 | 28.854839 | 85 | py |
ParallelWaveGAN | ParallelWaveGAN-master/test/test_melgan.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
import logging
import numpy as np
import pytest
import torch
from test_parallel_wavegan import (
make_discriminator_args,
make_mutli_reso_stft_loss_args,
make_residual_discriminator_args,
)
from parallel_wavegan.losses import (
DiscriminatorAdversarialLoss,
FeatureMatchLoss,
GeneratorAdversarialLoss,
MultiResolutionSTFTLoss,
)
from parallel_wavegan.models import (
MelGANGenerator,
MelGANMultiScaleDiscriminator,
ParallelWaveGANDiscriminator,
ResidualParallelWaveGANDiscriminator,
)
from parallel_wavegan.optimizers import RAdam
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
def make_melgan_generator_args(**kwargs):
defaults = dict(
in_channels=80,
out_channels=1,
kernel_size=7,
channels=512,
bias=True,
upsample_scales=[8, 8, 2, 2],
stack_kernel_size=3,
stacks=3,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
use_final_nonlinear_activation=True,
use_weight_norm=True,
use_causal_conv=False,
)
defaults.update(kwargs)
return defaults
def make_melgan_discriminator_args(**kwargs):
defaults = dict(
in_channels=1,
out_channels=1,
scales=3,
downsample_pooling="AvgPool1d",
# follow the official implementation setting
downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 1,
"count_include_pad": False,
},
kernel_sizes=[5, 3],
channels=16,
max_downsample_channels=1024,
bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
use_weight_norm=True,
)
defaults.update(kwargs)
return defaults
@pytest.mark.parametrize(
"dict_g, dict_d, dict_loss",
[
({}, {}, {}),
({"kernel_size": 3}, {}, {}),
({"channels": 1024}, {}, {}),
({"stack_kernel_size": 5}, {}, {}),
({"stack_kernel_size": 5, "stacks": 2}, {}, {}),
({"upsample_scales": [4, 4, 4, 4]}, {}, {}),
({"upsample_scales": [8, 8, 2, 2, 2]}, {}, {}),
({"channels": 1024, "upsample_scales": [8, 8, 2, 2, 2, 2]}, {}, {}),
({"pad": "ConstantPad1d", "pad_params": {"value": 0.0}}, {}, {}),
({"nonlinear_activation": "ReLU", "nonlinear_activation_params": {}}, {}, {}),
({"bias": False}, {}, {}),
({"use_final_nonlinear_activation": False}, {}, {}),
({"use_weight_norm": False}, {}, {}),
({"use_causal_conv": True}, {}, {}),
],
)
def test_melgan_trainable(dict_g, dict_d, dict_loss):
# setup
batch_size = 4
batch_length = 4096
args_g = make_melgan_generator_args(**dict_g)
args_d = make_discriminator_args(**dict_d)
args_loss = make_mutli_reso_stft_loss_args(**dict_loss)
y = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["in_channels"],
batch_length // np.prod(args_g["upsample_scales"]),
)
model_g = MelGANGenerator(**args_g)
model_d = ParallelWaveGANDiscriminator(**args_d)
aux_criterion = MultiResolutionSTFTLoss(**args_loss)
gen_adv_criterion = GeneratorAdversarialLoss()
dis_adv_criterion = DiscriminatorAdversarialLoss()
optimizer_g = RAdam(model_g.parameters())
optimizer_d = RAdam(model_d.parameters())
# check generator trainable
y_hat = model_g(c)
p_hat = model_d(y_hat)
adv_loss = gen_adv_criterion(p_hat)
sc_loss, mag_loss = aux_criterion(y_hat, y)
aux_loss = sc_loss + mag_loss
loss_g = adv_loss + aux_loss
optimizer_g.zero_grad()
loss_g.backward()
optimizer_g.step()
# check discriminator trainable
p = model_d(y)
p_hat = model_d(y_hat.detach())
real_loss, fake_loss = dis_adv_criterion(p_hat, p)
loss_d = real_loss + fake_loss
optimizer_d.zero_grad()
loss_d.backward()
optimizer_d.step()
@pytest.mark.parametrize(
"dict_g, dict_d, dict_loss",
[
({}, {}, {}),
({"kernel_size": 3}, {}, {}),
({"channels": 1024}, {}, {}),
({"stack_kernel_size": 5}, {}, {}),
({"stack_kernel_size": 5, "stacks": 2}, {}, {}),
({"upsample_scales": [4, 4, 4, 4]}, {}, {}),
({"upsample_scales": [8, 8, 2, 2, 2]}, {}, {}),
({"channels": 1024, "upsample_scales": [8, 8, 2, 2, 2, 2]}, {}, {}),
({"pad": "ConstantPad1d", "pad_params": {"value": 0.0}}, {}, {}),
({"nonlinear_activation": "ReLU", "nonlinear_activation_params": {}}, {}, {}),
({"bias": False}, {}, {}),
({"use_final_nonlinear_activation": False}, {}, {}),
({"use_weight_norm": False}, {}, {}),
],
)
def test_melgan_trainable_with_residual_discriminator(dict_g, dict_d, dict_loss):
# setup
batch_size = 4
batch_length = 4096
args_g = make_melgan_generator_args(**dict_g)
args_d = make_residual_discriminator_args(**dict_d)
args_loss = make_mutli_reso_stft_loss_args(**dict_loss)
y = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["in_channels"],
batch_length // np.prod(args_g["upsample_scales"]),
)
model_g = MelGANGenerator(**args_g)
model_d = ResidualParallelWaveGANDiscriminator(**args_d)
aux_criterion = MultiResolutionSTFTLoss(**args_loss)
gen_adv_criterion = GeneratorAdversarialLoss()
dis_adv_criterion = DiscriminatorAdversarialLoss()
optimizer_g = RAdam(model_g.parameters())
optimizer_d = RAdam(model_d.parameters())
# check generator trainable
y_hat = model_g(c)
p_hat = model_d(y_hat)
adv_loss = gen_adv_criterion(p_hat)
sc_loss, mag_loss = aux_criterion(y_hat, y)
aux_loss = sc_loss + mag_loss
loss_g = adv_loss + aux_loss
optimizer_g.zero_grad()
loss_g.backward()
optimizer_g.step()
# check discriminator trainable
p = model_d(y)
p_hat = model_d(y_hat.detach())
real_loss, fake_loss = dis_adv_criterion(p_hat, p)
loss_d = real_loss + fake_loss
optimizer_d.zero_grad()
loss_d.backward()
optimizer_d.step()
@pytest.mark.parametrize(
"dict_g, dict_d, dict_loss",
[
({}, {}, {}),
({}, {"scales": 4}, {}),
({}, {"kernel_sizes": [7, 5]}, {}),
({}, {"max_downsample_channels": 128}, {}),
({}, {"downsample_scales": [4, 4]}, {}),
({}, {"pad": "ConstantPad1d", "pad_params": {"value": 0.0}}, {}),
({}, {"nonlinear_activation": "ReLU", "nonlinear_activation_params": {}}, {}),
],
)
def test_melgan_trainable_with_melgan_discriminator(dict_g, dict_d, dict_loss):
# setup
batch_size = 4
batch_length = 4096
args_g = make_melgan_generator_args(**dict_g)
args_d = make_melgan_discriminator_args(**dict_d)
args_loss = make_mutli_reso_stft_loss_args(**dict_loss)
y = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["in_channels"],
batch_length // np.prod(args_g["upsample_scales"]),
)
model_g = MelGANGenerator(**args_g)
model_d = MelGANMultiScaleDiscriminator(**args_d)
aux_criterion = MultiResolutionSTFTLoss(**args_loss)
feat_match_criterion = FeatureMatchLoss()
gen_adv_criterion = GeneratorAdversarialLoss()
dis_adv_criterion = DiscriminatorAdversarialLoss()
optimizer_g = RAdam(model_g.parameters())
optimizer_d = RAdam(model_d.parameters())
# check generator trainable
y_hat = model_g(c)
p_hat = model_d(y_hat)
sc_loss, mag_loss = aux_criterion(y_hat, y)
aux_loss = sc_loss + mag_loss
adv_loss = gen_adv_criterion(p_hat)
with torch.no_grad():
p = model_d(y)
fm_loss = feat_match_criterion(p_hat, p)
loss_g = adv_loss + aux_loss + fm_loss
optimizer_g.zero_grad()
loss_g.backward()
optimizer_g.step()
# check discriminator trainable
p = model_d(y)
p_hat = model_d(y_hat.detach())
real_loss, fake_loss = dis_adv_criterion(p_hat, p)
loss_d = real_loss + fake_loss
optimizer_d.zero_grad()
loss_d.backward()
optimizer_d.step()
@pytest.mark.parametrize(
"dict_g",
[
({"use_causal_conv": True}),
({"use_causal_conv": True, "upsample_scales": [4, 4, 2, 2]}),
({"use_causal_conv": True, "upsample_scales": [4, 5, 4, 3]}),
],
)
def test_causal_melgan(dict_g):
batch_size = 4
batch_length = 4096
args_g = make_melgan_generator_args(**dict_g)
upsampling_factor = np.prod(args_g["upsample_scales"])
c = torch.randn(
batch_size, args_g["in_channels"], batch_length // upsampling_factor
)
model_g = MelGANGenerator(**args_g)
c_ = c.clone()
c_[..., c.size(-1) // 2 :] = torch.randn(c[..., c.size(-1) // 2 :].shape)
try:
# check not equal
np.testing.assert_array_equal(c.numpy(), c_.numpy())
except AssertionError:
pass
else:
raise AssertionError("Must be different.")
# check causality
y = model_g(c)
y_ = model_g(c_)
assert y.size(2) == c.size(2) * upsampling_factor
np.testing.assert_array_equal(
y[..., : c.size(-1) // 2 * upsampling_factor].detach().cpu().numpy(),
y_[..., : c_.size(-1) // 2 * upsampling_factor].detach().cpu().numpy(),
)
| 9,711 | 31.15894 | 86 | py |
ParallelWaveGAN | ParallelWaveGAN-master/test/test_parallel_wavegan.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
import logging
import numpy as np
import pytest
import torch
from parallel_wavegan.losses import (
DiscriminatorAdversarialLoss,
GeneratorAdversarialLoss,
MultiResolutionSTFTLoss,
)
from parallel_wavegan.models import (
ParallelWaveGANDiscriminator,
ParallelWaveGANGenerator,
ResidualParallelWaveGANDiscriminator,
)
from parallel_wavegan.optimizers import RAdam
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
def make_generator_args(**kwargs):
defaults = dict(
in_channels=1,
out_channels=1,
kernel_size=3,
layers=6,
stacks=3,
residual_channels=8,
gate_channels=16,
skip_channels=8,
aux_channels=10,
aux_context_window=0,
dropout=1 - 0.95,
use_weight_norm=True,
use_causal_conv=False,
upsample_conditional_features=True,
upsample_net="ConvInUpsampleNetwork",
upsample_params={"upsample_scales": [4, 4]},
)
defaults.update(kwargs)
return defaults
def make_discriminator_args(**kwargs):
defaults = dict(
in_channels=1,
out_channels=1,
kernel_size=3,
layers=5,
conv_channels=16,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
bias=True,
use_weight_norm=True,
)
defaults.update(kwargs)
return defaults
def make_residual_discriminator_args(**kwargs):
defaults = dict(
in_channels=1,
out_channels=1,
kernel_size=3,
layers=10,
stacks=1,
residual_channels=8,
gate_channels=16,
skip_channels=8,
dropout=0.0,
use_weight_norm=True,
use_causal_conv=False,
nonlinear_activation_params={"negative_slope": 0.2},
)
defaults.update(kwargs)
return defaults
def make_mutli_reso_stft_loss_args(**kwargs):
defaults = dict(
fft_sizes=[64, 128, 256],
hop_sizes=[32, 64, 128],
win_lengths=[48, 96, 192],
window="hann_window",
)
defaults.update(kwargs)
return defaults
@pytest.mark.parametrize(
"dict_g, dict_d, dict_loss",
[
({}, {}, {}),
({"layers": 1, "stacks": 1}, {}, {}),
({}, {"layers": 1}, {}),
({"kernel_size": 5}, {}, {}),
({}, {"kernel_size": 5}, {}),
({"gate_channels": 8}, {}, {}),
({"stacks": 1}, {}, {}),
({"use_weight_norm": False}, {"use_weight_norm": False}, {}),
({"aux_context_window": 2}, {}, {}),
({"upsample_net": "UpsampleNetwork"}, {}, {}),
(
{"upsample_params": {"upsample_scales": [4], "freq_axis_kernel_size": 3}},
{},
{},
),
(
{
"upsample_params": {
"upsample_scales": [4],
"nonlinear_activation": "ReLU",
}
},
{},
{},
),
(
{
"upsample_conditional_features": False,
"upsample_params": {"upsample_scales": [1]},
},
{},
{},
),
({}, {"nonlinear_activation": "ReLU", "nonlinear_activation_params": {}}, {}),
({"use_causal_conv": True}, {}, {}),
({"use_causal_conv": True, "upsample_net": "UpsampleNetwork"}, {}, {}),
({"use_causal_conv": True, "aux_context_window": 1}, {}, {}),
({"use_causal_conv": True, "aux_context_window": 2}, {}, {}),
({"use_causal_conv": True, "aux_context_window": 3}, {}, {}),
(
{
"aux_channels": 16,
"upsample_net": "MelGANGenerator",
"upsample_params": {
"upsample_scales": [4, 4],
"in_channels": 16,
"out_channels": 16,
},
},
{},
{},
),
],
)
def test_parallel_wavegan_trainable(dict_g, dict_d, dict_loss):
# setup
batch_size = 4
batch_length = 4096
args_g = make_generator_args(**dict_g)
args_d = make_discriminator_args(**dict_d)
args_loss = make_mutli_reso_stft_loss_args(**dict_loss)
z = torch.randn(batch_size, 1, batch_length)
y = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["aux_channels"],
batch_length // np.prod(args_g["upsample_params"]["upsample_scales"])
+ 2 * args_g["aux_context_window"],
)
model_g = ParallelWaveGANGenerator(**args_g)
model_d = ParallelWaveGANDiscriminator(**args_d)
aux_criterion = MultiResolutionSTFTLoss(**args_loss)
gen_adv_criterion = GeneratorAdversarialLoss()
dis_adv_criterion = DiscriminatorAdversarialLoss()
optimizer_g = RAdam(model_g.parameters())
optimizer_d = RAdam(model_d.parameters())
# check generator trainable
y_hat = model_g(z, c)
p_hat = model_d(y_hat)
adv_loss = gen_adv_criterion(p_hat)
sc_loss, mag_loss = aux_criterion(y_hat, y)
aux_loss = sc_loss + mag_loss
loss_g = adv_loss + aux_loss
optimizer_g.zero_grad()
loss_g.backward()
optimizer_g.step()
# check discriminator trainable
p = model_d(y)
p_hat = model_d(y_hat.detach())
real_loss, fake_loss = dis_adv_criterion(p_hat, p)
loss_d = real_loss + fake_loss
optimizer_d.zero_grad()
loss_d.backward()
optimizer_d.step()
@pytest.mark.parametrize(
"dict_g, dict_d, dict_loss",
[
({}, {}, {}),
({"layers": 1, "stacks": 1}, {}, {}),
({}, {"layers": 1}, {}),
({"kernel_size": 5}, {}, {}),
({}, {"kernel_size": 5}, {}),
({"gate_channels": 8}, {}, {}),
({"stacks": 1}, {}, {}),
({"use_weight_norm": False}, {"use_weight_norm": False}, {}),
({"aux_context_window": 2}, {}, {}),
({"upsample_net": "UpsampleNetwork"}, {}, {}),
(
{"upsample_params": {"upsample_scales": [4], "freq_axis_kernel_size": 3}},
{},
{},
),
(
{
"upsample_params": {
"upsample_scales": [4],
"nonlinear_activation": "ReLU",
}
},
{},
{},
),
(
{
"upsample_conditional_features": False,
"upsample_params": {"upsample_scales": [1]},
},
{},
{},
),
({}, {"nonlinear_activation": "ReLU", "nonlinear_activation_params": {}}, {}),
({"use_causal_conv": True}, {}, {}),
({"use_causal_conv": True, "upsample_net": "UpsampleNetwork"}, {}, {}),
({"use_causal_conv": True, "aux_context_window": 1}, {}, {}),
({"use_causal_conv": True, "aux_context_window": 2}, {}, {}),
({"use_causal_conv": True, "aux_context_window": 3}, {}, {}),
(
{
"aux_channels": 16,
"upsample_net": "MelGANGenerator",
"upsample_params": {
"upsample_scales": [4, 4],
"in_channels": 16,
"out_channels": 16,
},
},
{},
{},
),
],
)
def test_parallel_wavegan_with_residual_discriminator_trainable(
dict_g, dict_d, dict_loss
):
# setup
batch_size = 4
batch_length = 4096
args_g = make_generator_args(**dict_g)
args_d = make_residual_discriminator_args(**dict_d)
args_loss = make_mutli_reso_stft_loss_args(**dict_loss)
z = torch.randn(batch_size, 1, batch_length)
y = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["aux_channels"],
batch_length // np.prod(args_g["upsample_params"]["upsample_scales"])
+ 2 * args_g["aux_context_window"],
)
model_g = ParallelWaveGANGenerator(**args_g)
model_d = ResidualParallelWaveGANDiscriminator(**args_d)
aux_criterion = MultiResolutionSTFTLoss(**args_loss)
gen_adv_criterion = GeneratorAdversarialLoss()
dis_adv_criterion = DiscriminatorAdversarialLoss()
optimizer_g = RAdam(model_g.parameters())
optimizer_d = RAdam(model_d.parameters())
# check generator trainable
y_hat = model_g(z, c)
p_hat = model_d(y_hat)
adv_loss = gen_adv_criterion(p_hat)
sc_loss, mag_loss = aux_criterion(y_hat, y)
aux_loss = sc_loss + mag_loss
loss_g = adv_loss + aux_loss
optimizer_g.zero_grad()
loss_g.backward()
optimizer_g.step()
# check discriminator trainable
p = model_d(y)
p_hat = model_d(y_hat.detach())
real_loss, fake_loss = dis_adv_criterion(p_hat, p)
loss_d = real_loss + fake_loss
optimizer_d.zero_grad()
loss_d.backward()
optimizer_d.step()
@pytest.mark.parametrize(
"upsample_net, aux_context_window",
[
("ConvInUpsampleNetwork", 0),
("ConvInUpsampleNetwork", 1),
("ConvInUpsampleNetwork", 2),
("ConvInUpsampleNetwork", 3),
("UpsampleNetwork", 0),
],
)
def test_causal_parallel_wavegan(upsample_net, aux_context_window):
batch_size = 1
batch_length = 4096
args_g = make_generator_args(
use_causal_conv=True,
upsample_net=upsample_net,
aux_context_window=aux_context_window,
dropout=0.0,
)
model_g = ParallelWaveGANGenerator(**args_g)
z = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["aux_channels"],
batch_length // np.prod(args_g["upsample_params"]["upsample_scales"]),
)
z_ = z.clone()
c_ = c.clone()
z_[..., z.size(-1) // 2 :] = torch.randn(z[..., z.size(-1) // 2 :].shape)
c_[..., c.size(-1) // 2 :] = torch.randn(c[..., c.size(-1) // 2 :].shape)
c = torch.nn.ConstantPad1d(args_g["aux_context_window"], 0.0)(c)
c_ = torch.nn.ConstantPad1d(args_g["aux_context_window"], 0.0)(c_)
try:
# check not equal
np.testing.assert_array_equal(c.numpy(), c_.numpy())
except AssertionError:
pass
else:
raise AssertionError("Must be different.")
try:
# check not equal
np.testing.assert_array_equal(z.numpy(), z_.numpy())
except AssertionError:
pass
else:
raise AssertionError("Must be different.")
# check causality
y = model_g(z, c)
y_ = model_g(z_, c_)
np.testing.assert_array_equal(
y[..., : y.size(-1) // 2].detach().cpu().numpy(),
y_[..., : y_.size(-1) // 2].detach().cpu().numpy(),
)
| 10,809 | 29.111421 | 86 | py |
ParallelWaveGAN | ParallelWaveGAN-master/test/test_style_melgan.py | #!/usr/bin/env python3
# Copyright 2021 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Test code for StyleMelGAN modules."""
import logging
import numpy as np
import pytest
import torch
from test_parallel_wavegan import make_mutli_reso_stft_loss_args
from parallel_wavegan.losses import (
DiscriminatorAdversarialLoss,
GeneratorAdversarialLoss,
MultiResolutionSTFTLoss,
)
from parallel_wavegan.models import StyleMelGANDiscriminator, StyleMelGANGenerator
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
def make_style_melgan_generator_args(**kwargs):
defaults = dict(
in_channels=128,
aux_channels=80,
channels=64,
out_channels=1,
kernel_size=9,
dilation=2,
bias=True,
noise_upsample_scales=[11, 2, 2, 2],
noise_upsample_activation="LeakyReLU",
noise_upsample_activation_params={"negative_slope": 0.2},
upsample_scales=[2, 2, 2, 2, 2, 2, 2, 2, 1],
upsample_mode="nearest",
gated_function="softmax",
use_weight_norm=True,
)
defaults.update(kwargs)
return defaults
def make_style_melgan_discriminator_args(**kwargs):
defaults = dict(
repeats=2,
window_sizes=[512, 1024, 2048, 4096],
pqmf_params=[
[1, None, None, None],
[2, 62, 0.26700, 9.0],
[4, 62, 0.14200, 9.0],
[8, 62, 0.07949, 9.0],
],
discriminator_params={
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"max_downsample_channels": 32,
"bias": True,
"downsample_scales": [4, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.2},
"pad": "ReflectionPad1d",
"pad_params": {},
},
use_weight_norm=True,
)
defaults.update(kwargs)
return defaults
@pytest.mark.parametrize(
"dict_d",
[
{"repeats": 1},
{"repeats": 4},
],
)
def test_style_melgan_discriminator(dict_d):
batch_size = 4
batch_length = 2**14
args_d = make_style_melgan_discriminator_args(**dict_d)
y = torch.randn(batch_size, 1, batch_length)
model_d = StyleMelGANDiscriminator(**args_d)
gen_adv_criterion = GeneratorAdversarialLoss()
outs = model_d(y)
gen_adv_criterion(outs)
@pytest.mark.parametrize(
"dict_g",
[
{},
{"noise_upsample_scales": [4, 4, 4]},
],
)
def test_style_melgan_generator(dict_g):
args_g = make_style_melgan_generator_args(**dict_g)
batch_size = 4
batch_length = np.prod(args_g["noise_upsample_scales"]) * np.prod(
args_g["upsample_scales"]
)
z = torch.randn(batch_size, args_g["in_channels"], 1)
c = torch.randn(
batch_size,
args_g["aux_channels"],
batch_length // np.prod(args_g["upsample_scales"]),
)
model_g = StyleMelGANGenerator(**args_g)
model_g(c, z)
# inference
c = torch.randn(
512,
args_g["aux_channels"],
)
y = model_g.inference(c)
print(y.shape)
@pytest.mark.parametrize(
"dict_g, dict_d, dict_loss, loss_type",
[
({}, {}, {}, "mse"),
({}, {}, {}, "hinge"),
({"noise_upsample_scales": [4, 4, 4]}, {}, {}, "mse"),
({"gated_function": "sigmoid"}, {}, {}, "mse"),
],
)
def test_style_melgan_trainable(dict_g, dict_d, dict_loss, loss_type):
# setup
args_g = make_style_melgan_generator_args(**dict_g)
args_d = make_style_melgan_discriminator_args(**dict_d)
args_loss = make_mutli_reso_stft_loss_args(**dict_loss)
batch_size = 4
batch_length = np.prod(args_g["noise_upsample_scales"]) * np.prod(
args_g["upsample_scales"]
)
y = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["aux_channels"],
batch_length // np.prod(args_g["upsample_scales"]),
)
model_g = StyleMelGANGenerator(**args_g)
model_d = StyleMelGANDiscriminator(**args_d)
aux_criterion = MultiResolutionSTFTLoss(**args_loss)
gen_adv_criterion = GeneratorAdversarialLoss(loss_type=loss_type)
dis_adv_criterion = DiscriminatorAdversarialLoss(loss_type=loss_type)
optimizer_g = torch.optim.Adam(model_g.parameters())
optimizer_d = torch.optim.Adam(model_d.parameters())
# check generator trainable
y_hat = model_g(c)
p_hat = model_d(y_hat)
adv_loss = gen_adv_criterion(p_hat)
sc_loss, mag_loss = aux_criterion(y_hat, y)
aux_loss = sc_loss + mag_loss
loss_g = adv_loss + aux_loss
optimizer_g.zero_grad()
loss_g.backward()
optimizer_g.step()
# check discriminator trainable
p = model_d(y)
p_hat = model_d(y_hat.detach())
real_loss, fake_loss = dis_adv_criterion(p_hat, p)
loss_d = real_loss + fake_loss
optimizer_d.zero_grad()
loss_d.backward()
optimizer_d.step()
| 5,057 | 27.576271 | 82 | py |
ParallelWaveGAN | ParallelWaveGAN-master/egs/vctk/vq1/local/decode_from_text.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Decode text with trained VQ-VAE Generator or discrete symbol vocoder."""
import argparse
import logging
import os
import time
import soundfile as sf
import torch
import yaml
from tqdm import tqdm
import parallel_wavegan.models
from parallel_wavegan.utils import load_model
def main():
"""Run decoding process."""
parser = argparse.ArgumentParser(
description=(
"Decode text with trained VQ-VAE decoder "
"(See detail in parallel_wavegan/bin/decode.py)."
)
)
parser.add_argument(
"--text",
required=True,
type=str,
help="kaldi-style text file.",
)
parser.add_argument(
"--utt2spk",
default=None,
type=str,
help="kaldi-style utt2spk file.",
)
parser.add_argument(
"--spk2idx",
default=None,
type=str,
help="kaldi-style spk2idx file.",
)
parser.add_argument(
"--outdir",
type=str,
required=True,
help="directory to save generated speech.",
)
parser.add_argument(
"--checkpoint",
type=str,
required=True,
help="checkpoint file to be loaded.",
)
parser.add_argument(
"--config",
default=None,
type=str,
help=(
"yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)"
),
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
if args.config is None:
dirname = os.path.dirname(args.checkpoint)
args.config = os.path.join(dirname, "config.yml")
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
# setup model
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = load_model(args.checkpoint, config)
logging.info(f"Loaded model parameters from {args.checkpoint}.")
model.remove_weight_norm()
model = model.eval().to(device)
is_vqvae = isinstance(model, parallel_wavegan.models.VQVAE)
# setup dataset
with open(args.text) as f:
lines = [l_.replace("\n", "") for l_ in f.readlines()]
text = {l_.split()[0]: list(map(int, l_.split()[1:])) for l_ in lines}
utt2spk = None
if args.utt2spk is not None:
assert args.spk2idx is not None
with open(args.utt2spk) as f:
lines = [l_.replace("\n", "") for l_ in f.readlines()]
utt2spk = {l_.split()[0]: str(l_.split()[1]) for l_ in lines}
with open(args.spk2idx) as f:
lines = [l_.replace("\n", "") for l_ in f.readlines()]
spk2idx = {l_.split()[0]: int(l_.split()[1]) for l_ in lines}
# start generation
total_rtf = 0.0
with torch.no_grad(), tqdm(text.items(), desc="[decode]") as pbar:
for idx, items in enumerate(pbar, 1):
utt_id, indices = items
z = torch.LongTensor(indices).view(1, -1).to(device)
g = None
if utt2spk is not None:
spk_idx = spk2idx[utt2spk[utt_id]]
g = torch.tensor(spk_idx).long().view(1).to(device)
if is_vqvae:
# VQVAE case
start = time.time()
y = model.decode(z, None, g).view(-1).cpu().numpy()
rtf = (time.time() - start) / (len(y) / config["sampling_rate"])
pbar.set_postfix({"RTF": rtf})
total_rtf += rtf
else:
# Discrete symbol vocoder case
start = time.time()
g = int(g.item()) if g is not None else None
y = model.inference(z.view(-1, 1), g=g).view(-1).cpu().numpy()
rtf = (time.time() - start) / (len(y) / config["sampling_rate"])
pbar.set_postfix({"RTF": rtf})
total_rtf += rtf
# save as PCM 16 bit wav file
sf.write(
os.path.join(config["outdir"], f"{utt_id}_gen.wav"),
y,
config["sampling_rate"],
"PCM_16",
)
# report average RTF
logging.info(
f"Finished generation of {idx} utterances (RTF = {total_rtf / idx:.03f})."
)
if __name__ == "__main__":
main()
| 5,353 | 29.420455 | 84 | py |
ParallelWaveGAN | ParallelWaveGAN-master/egs/cvss_c/voc1/local/decode_from_text.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Decode text with trained VQ-VAE Generator or discrete symbol vocoder."""
import argparse
import logging
import os
import time
import soundfile as sf
import torch
import yaml
from tqdm import tqdm
import parallel_wavegan.models
from parallel_wavegan.utils import load_model
def main():
"""Run decoding process."""
parser = argparse.ArgumentParser(
description=(
"Decode text with trained VQ-VAE decoder "
"(See detail in parallel_wavegan/bin/decode.py)."
)
)
parser.add_argument(
"--text",
required=True,
type=str,
help="kaldi-style text file.",
)
parser.add_argument(
"--utt2spk",
default=None,
type=str,
help="kaldi-style utt2spk file.",
)
parser.add_argument(
"--spk2idx",
default=None,
type=str,
help="kaldi-style spk2idx file.",
)
parser.add_argument(
"--outdir",
type=str,
required=True,
help="directory to save generated speech.",
)
parser.add_argument(
"--checkpoint",
type=str,
required=True,
help="checkpoint file to be loaded.",
)
parser.add_argument(
"--gt_duration",
type=bool,
default=False,
help="whether to use ground truth duration",
)
parser.add_argument(
"--config",
default=None,
type=str,
help=(
"yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)"
),
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
if args.config is None:
dirname = os.path.dirname(args.checkpoint)
args.config = os.path.join(dirname, "config.yml")
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
# setup model
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = load_model(args.checkpoint, config)
logging.info(f"Loaded model parameters from {args.checkpoint}.")
model.remove_weight_norm()
model = model.eval().to(device)
# setup dataset
with open(args.text) as f:
lines = [l_.replace("\n", "") for l_ in f.readlines()]
text = {l_.split()[0]: list(map(int, l_.split()[1:])) for l_ in lines}
utt2spk = None
if args.utt2spk is not None:
assert args.spk2idx is not None
with open(args.utt2spk) as f:
lines = [l_.replace("\n", "") for l_ in f.readlines()]
utt2spk = {l_.split()[0]: str(l_.split()[1]) for l_ in lines}
with open(args.spk2idx) as f:
lines = [l_.replace("\n", "") for l_ in f.readlines()]
spk2idx = {l_.split()[0]: int(l_.split()[1]) for l_ in lines}
# start generation
total_rtf = 0.0
with torch.no_grad(), tqdm(text.items(), desc="[decode]") as pbar:
for idx, items in enumerate(pbar, 1):
utt_id, indices = items
z = torch.LongTensor(indices).view(1, -1).to(device)
z, ds = torch.unique_consecutive(z, return_counts=True, dim=1)
g = None
if utt2spk is not None:
spk_idx = spk2idx[utt2spk[utt_id]]
g = torch.tensor(spk_idx).long().view(1).to(device)
# Discrete symbol vocoder case
start = time.time()
g = int(g.item()) if g is not None else None
if args.gt_duration:
y = model.inference(z.view(-1, 1), g=g, ds=ds).view(-1).cpu().numpy()
else:
y = model.inference(z.view(-1, 1), g=g).view(-1).cpu().numpy()
rtf = (time.time() - start) / (len(y) / config["sampling_rate"])
pbar.set_postfix({"RTF": rtf})
total_rtf += rtf
# save as PCM 16 bit wav file
sf.write(
os.path.join(config["outdir"], f"{utt_id}_gen.wav"),
y,
config["sampling_rate"],
"PCM_16",
)
# report average RTF
logging.info(
f"Finished generation of {idx} utterances (RTF = {total_rtf / idx:.03f})."
)
if __name__ == "__main__":
main()
| 5,294 | 28.416667 | 85 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/functions/vector_quantizer.py | # -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Vector quantization modules.
These codes are modified from https://github.com/ritheshkumar95/pytorch-vqvae.
"""
import torch
from torch.autograd import Function
class VectorQuantization(Function):
"""Vector quantization modele."""
@staticmethod
@torch.no_grad()
def forward(ctx, inputs, codebook):
"""Calculate forward propagation.
Args:
inputs (Tensor): Input tensor (B, `*`, embed_dim).
codebook (Tensor): Embedding weights (num_embeds, embed_dim).
Returns:
LongTensor: Codebook indices (B, `*`).
"""
embedding_size = codebook.size(1)
inputs_size = inputs.size()
inputs_flatten = inputs.view(-1, embedding_size)
codebook_sqr = torch.sum(codebook**2, dim=1)
inputs_sqr = torch.sum(inputs_flatten**2, dim=1, keepdim=True)
# Compute the distances to the codebook
distances = torch.addmm(
codebook_sqr + inputs_sqr,
inputs_flatten,
codebook.t(),
alpha=-2.0,
beta=1.0,
)
_, indices_flatten = torch.min(distances, dim=1)
indices = indices_flatten.view(*inputs_size[:-1])
ctx.mark_non_differentiable(indices)
return indices
@staticmethod
def backward(ctx, grad_output):
"""Calculate backward propagation."""
raise RuntimeError(
"Trying to call `.grad()` on graph containing "
"`VectorQuantization`. The function `VectorQuantization` "
"is not differentiable. Use `VectorQuantizationStraightThrough` "
"if you want a straight-through estimator of the gradient."
)
class VectorQuantizationStraightThrough(Function):
"""Differentiable vector quantize module with straight through technique."""
@staticmethod
def forward(ctx, inputs, codebook):
"""Calculate forward propagation.
Args:
inputs (Tensor): Input tensor (B, `*`, embed_dim).
codebook (Tensor): Embedding weights (num_embeds, embed_dim).
Returns:
Tensor: Codebook embeddings (B, `*`, embed_dim).
LongTensor: Codebook indices (B, `*`).
"""
indices = vector_quantize(inputs, codebook)
indices_flatten = indices.view(-1)
ctx.save_for_backward(indices_flatten, codebook)
ctx.mark_non_differentiable(indices_flatten)
codes_flatten = torch.index_select(codebook, dim=0, index=indices_flatten)
codes = codes_flatten.view_as(inputs)
return (codes, indices_flatten)
@staticmethod
def backward(ctx, grad_output, grad_indices):
"""Calculate backward propagation."""
grad_inputs, grad_codebook = None, None
if ctx.needs_input_grad[0]:
# Straight-through estimator
grad_inputs = grad_output.clone()
if ctx.needs_input_grad[1]:
# Gradient wrt. the codebook
indices, codebook = ctx.saved_tensors
embedding_size = codebook.size(1)
grad_output_flatten = grad_output.contiguous().view(-1, embedding_size)
grad_codebook = torch.zeros_like(codebook)
grad_codebook.index_add_(0, indices, grad_output_flatten)
return (grad_inputs, grad_codebook)
# register functions
vector_quantize = VectorQuantization.apply
vector_quantize_straight_through = VectorQuantizationStraightThrough.apply
__all__ = [vector_quantize, vector_quantize_straight_through]
| 3,630 | 30.573913 | 83 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/models/parallel_wavegan.py | # -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Parallel WaveGAN Modules."""
import logging
import math
import numpy as np
import torch
from parallel_wavegan import models
from parallel_wavegan.layers import Conv1d, Conv1d1x1
from parallel_wavegan.layers import WaveNetResidualBlock as ResidualBlock
from parallel_wavegan.layers import upsample
from parallel_wavegan.utils import read_hdf5
class ParallelWaveGANGenerator(torch.nn.Module):
"""Parallel WaveGAN Generator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
kernel_size=3,
layers=30,
stacks=3,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
aux_context_window=2,
dropout=0.0,
bias=True,
use_weight_norm=True,
use_causal_conv=False,
upsample_conditional_features=True,
upsample_net="ConvInUpsampleNetwork",
upsample_params={"upsample_scales": [4, 4, 4, 4]},
):
"""Initialize Parallel WaveGAN Generator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of dilated convolution.
layers (int): Number of residual block layers.
stacks (int): Number of stacks i.e., dilation cycles.
residual_channels (int): Number of channels in residual conv.
gate_channels (int): Number of channels in gated conv.
skip_channels (int): Number of channels in skip conv.
aux_channels (int): Number of channels for auxiliary feature conv.
aux_context_window (int): Context window size for auxiliary feature.
dropout (float): Dropout rate. 0.0 means no dropout applied.
bias (bool): Whether to use bias parameter in conv layer.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_causal_conv (bool): Whether to use causal structure.
upsample_conditional_features (bool): Whether to use upsampling network.
upsample_net (str): Upsampling network architecture.
upsample_params (dict): Upsampling network parameters.
"""
super(ParallelWaveGANGenerator, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aux_channels = aux_channels
self.aux_context_window = aux_context_window
self.layers = layers
self.stacks = stacks
self.kernel_size = kernel_size
# check the number of layers and stacks
assert layers % stacks == 0
layers_per_stack = layers // stacks
# define first convolution
self.first_conv = Conv1d1x1(in_channels, residual_channels, bias=True)
# define conv + upsampling network
if upsample_conditional_features:
upsample_params.update(
{
"use_causal_conv": use_causal_conv,
}
)
if upsample_net == "MelGANGenerator":
assert aux_context_window == 0
upsample_params.update(
{
"use_weight_norm": False, # not to apply twice
"use_final_nonlinear_activation": False,
}
)
self.upsample_net = getattr(models, upsample_net)(**upsample_params)
else:
if upsample_net == "ConvInUpsampleNetwork":
upsample_params.update(
{
"aux_channels": aux_channels,
"aux_context_window": aux_context_window,
}
)
self.upsample_net = getattr(upsample, upsample_net)(**upsample_params)
self.upsample_factor = np.prod(upsample_params["upsample_scales"])
else:
self.upsample_net = None
self.upsample_factor = 1
# define residual blocks
self.conv_layers = torch.nn.ModuleList()
for layer in range(layers):
dilation = 2 ** (layer % layers_per_stack)
conv = ResidualBlock(
kernel_size=kernel_size,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=aux_channels,
dilation=dilation,
dropout=dropout,
bias=bias,
use_causal_conv=use_causal_conv,
)
self.conv_layers += [conv]
# define output layers
self.last_conv_layers = torch.nn.ModuleList(
[
torch.nn.ReLU(inplace=True),
Conv1d1x1(skip_channels, skip_channels, bias=True),
torch.nn.ReLU(inplace=True),
Conv1d1x1(skip_channels, out_channels, bias=True),
]
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, z, c):
"""Calculate forward propagation.
Args:
z (Tensor): Input noise signal (B, 1, T).
c (Tensor): Local conditioning auxiliary features (B, C ,T').
Returns:
Tensor: Output tensor (B, out_channels, T)
"""
# perform upsampling
if c is not None and self.upsample_net is not None:
c = self.upsample_net(c)
assert c.size(-1) == z.size(-1)
# encode to hidden representation
x = self.first_conv(z)
skips = 0
for f in self.conv_layers:
x, h = f(x, c)
skips += h
skips *= math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
x = skips
for f in self.last_conv_layers:
x = f(x)
return x
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
@staticmethod
def _get_receptive_field_size(
layers, stacks, kernel_size, dilation=lambda x: 2**x
):
assert layers % stacks == 0
layers_per_cycle = layers // stacks
dilations = [dilation(i % layers_per_cycle) for i in range(layers)]
return (kernel_size - 1) * sum(dilations) + 1
@property
def receptive_field_size(self):
"""Return receptive field size."""
return self._get_receptive_field_size(
self.layers, self.stacks, self.kernel_size
)
def register_stats(self, stats):
"""Register stats for de-normalization as buffer.
Args:
stats (str): Path of statistics file (".npy" or ".h5").
"""
assert stats.endswith(".h5") or stats.endswith(".npy")
if stats.endswith(".h5"):
mean = read_hdf5(stats, "mean").reshape(-1)
scale = read_hdf5(stats, "scale").reshape(-1)
else:
mean = np.load(stats)[0].reshape(-1)
scale = np.load(stats)[1].reshape(-1)
self.register_buffer("mean", torch.from_numpy(mean).float())
self.register_buffer("scale", torch.from_numpy(scale).float())
logging.info("Successfully registered stats as buffer.")
def inference(self, c=None, x=None, normalize_before=False):
"""Perform inference.
Args:
c (Union[Tensor, ndarray]): Local conditioning auxiliary features (T' ,C).
x (Union[Tensor, ndarray]): Input noise signal (T, 1).
normalize_before (bool): Whether to perform normalization.
Returns:
Tensor: Output tensor (T, out_channels)
"""
if x is not None:
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float).to(
next(self.parameters()).device
)
x = x.transpose(1, 0).unsqueeze(0)
else:
assert c is not None
x = torch.randn(1, 1, len(c) * self.upsample_factor).to(
next(self.parameters()).device
)
if c is not None:
if not isinstance(c, torch.Tensor):
c = torch.tensor(c, dtype=torch.float).to(
next(self.parameters()).device
)
if normalize_before:
c = (c - self.mean) / self.scale
c = c.transpose(1, 0).unsqueeze(0)
c = torch.nn.ReplicationPad1d(self.aux_context_window)(c)
return self.forward(x, c).squeeze(0).transpose(1, 0)
class ParallelWaveGANDiscriminator(torch.nn.Module):
"""Parallel WaveGAN Discriminator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
kernel_size=3,
layers=10,
conv_channels=64,
dilation_factor=1,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
bias=True,
use_weight_norm=True,
):
"""Initialize Parallel WaveGAN Discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Number of output channels.
layers (int): Number of conv layers.
conv_channels (int): Number of chnn layers.
dilation_factor (int): Dilation factor. For example, if dilation_factor = 2,
the dilation will be 2, 4, 8, ..., and so on.
nonlinear_activation (str): Nonlinear function after each conv.
nonlinear_activation_params (dict): Nonlinear function parameters
bias (bool): Whether to use bias parameter in conv.
use_weight_norm (bool) Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super(ParallelWaveGANDiscriminator, self).__init__()
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
assert dilation_factor > 0, "Dilation factor must be > 0."
self.conv_layers = torch.nn.ModuleList()
conv_in_channels = in_channels
for i in range(layers - 1):
if i == 0:
dilation = 1
else:
dilation = i if dilation_factor == 1 else dilation_factor**i
conv_in_channels = conv_channels
padding = (kernel_size - 1) // 2 * dilation
conv_layer = [
Conv1d(
conv_in_channels,
conv_channels,
kernel_size=kernel_size,
padding=padding,
dilation=dilation,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(
inplace=True, **nonlinear_activation_params
),
]
self.conv_layers += conv_layer
padding = (kernel_size - 1) // 2
last_conv_layer = Conv1d(
conv_in_channels,
out_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias,
)
self.conv_layers += [last_conv_layer]
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
Tensor: Output tensor (B, 1, T)
"""
for f in self.conv_layers:
x = f(x)
return x
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
class ResidualParallelWaveGANDiscriminator(torch.nn.Module):
"""Parallel WaveGAN Discriminator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
kernel_size=3,
layers=30,
stacks=3,
residual_channels=64,
gate_channels=128,
skip_channels=64,
dropout=0.0,
bias=True,
use_weight_norm=True,
use_causal_conv=False,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
):
"""Initialize Parallel WaveGAN Discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of dilated convolution.
layers (int): Number of residual block layers.
stacks (int): Number of stacks i.e., dilation cycles.
residual_channels (int): Number of channels in residual conv.
gate_channels (int): Number of channels in gated conv.
skip_channels (int): Number of channels in skip conv.
dropout (float): Dropout rate. 0.0 means no dropout applied.
bias (bool): Whether to use bias parameter in conv.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_causal_conv (bool): Whether to use causal structure.
nonlinear_activation_params (dict): Nonlinear function parameters
"""
super(ResidualParallelWaveGANDiscriminator, self).__init__()
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
self.in_channels = in_channels
self.out_channels = out_channels
self.layers = layers
self.stacks = stacks
self.kernel_size = kernel_size
# check the number of layers and stacks
assert layers % stacks == 0
layers_per_stack = layers // stacks
# define first convolution
self.first_conv = torch.nn.Sequential(
Conv1d1x1(in_channels, residual_channels, bias=True),
getattr(torch.nn, nonlinear_activation)(
inplace=True, **nonlinear_activation_params
),
)
# define residual blocks
self.conv_layers = torch.nn.ModuleList()
for layer in range(layers):
dilation = 2 ** (layer % layers_per_stack)
conv = ResidualBlock(
kernel_size=kernel_size,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=-1,
dilation=dilation,
dropout=dropout,
bias=bias,
use_causal_conv=use_causal_conv,
)
self.conv_layers += [conv]
# define output layers
self.last_conv_layers = torch.nn.ModuleList(
[
getattr(torch.nn, nonlinear_activation)(
inplace=True, **nonlinear_activation_params
),
Conv1d1x1(skip_channels, skip_channels, bias=True),
getattr(torch.nn, nonlinear_activation)(
inplace=True, **nonlinear_activation_params
),
Conv1d1x1(skip_channels, out_channels, bias=True),
]
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
Tensor: Output tensor (B, 1, T)
"""
x = self.first_conv(x)
skips = 0
for f in self.conv_layers:
x, h = f(x, None)
skips += h
skips *= math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
x = skips
for f in self.last_conv_layers:
x = f(x)
return x
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
| 18,221 | 34.313953 | 88 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/models/vqvae.py | # -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""VQVAE Modules."""
import logging
import torch
import parallel_wavegan.models
from parallel_wavegan.layers import VQCodebook
class VQVAE(torch.nn.Module):
"""VQVAE module."""
def __init__(
self,
in_channels=1,
out_channels=1,
num_embeds=512,
embed_dim=256,
num_local_embeds=None,
local_embed_dim=None,
num_global_embeds=None,
global_embed_dim=None,
encoder_type="MelGANDiscriminator",
decoder_type="MelGANGenerator",
encoder_conf={
"out_channels": 256,
"downsample_scales": [4, 4, 2, 2],
"max_downsample_channels": 1024,
},
decoder_conf={
"in_channels": 256,
"upsample_scales": [4, 4, 2, 2],
"channels": 512,
"stacks": 3,
},
use_weight_norm=True,
):
"""Initialize VQVAE module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
num_embeds (int): Number of embeddings.
embed_dim (int): Dimension of each embedding.
num_local_embeds (int): Number of local embeddings.
local_embed_dim (int): Dimension of each local embedding.
num_global_embeds (int): Number of global embeddings.
global_embed_dim (int): Dimension of each global embedding.
encoder_type (str): Encoder module name.
decoder_type (str): Decoder module name.
encoder_conf (dict): Hyperparameters for the encoder.
decoder_conf (dict): Hyperparameters for the decoder.
use_weight_norm (bool): Whether to use weight norm.
"""
super(VQVAE, self).__init__()
encoder_class = getattr(parallel_wavegan.models, encoder_type)
decoder_class = getattr(parallel_wavegan.models, decoder_type)
encoder_conf.update({"in_channels": in_channels})
decoder_conf.update({"out_channels": out_channels})
if not issubclass(decoder_class, parallel_wavegan.models.MelGANGenerator):
raise NotImplementedError(f"{decoder_class} is not supported yet.")
if num_local_embeds is not None:
if local_embed_dim is not None:
self.local_embed = torch.nn.Conv1d(num_local_embeds, local_embed_dim, 1)
else:
self.local_embed = None
if num_global_embeds is not None:
self.global_embed = torch.nn.Embedding(num_global_embeds, global_embed_dim)
self.encoder = encoder_class(**encoder_conf)
self.codebook = VQCodebook(num_embeds=num_embeds, embed_dim=embed_dim)
self.decoder = decoder_class(**decoder_conf)
# apply weight norm
if use_weight_norm:
self.remove_weight_norm() # for duplicated weight norm
self.apply_weight_norm()
def forward(self, x, l=None, g=None):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T).
l (Tensor): Local conditioning tensor (B, num_local_embeds, T).
g (LongTensor): Global conditioning idx (B, ).
Return:
Tensor: Reconstruced input tensor (B, in_channels, T).
Tensor: Encoder hidden states (B, embed_dim, T // prod(downsample_scales)).
Tensor: Quantized encoder hidden states (B, embed_dim, T // prod(downsample_scales)).
"""
z_e = self.encoder(x)
z_e = z_e[-1] if isinstance(z_e, list) else z_e # For MelGAN Discriminator
z_q_st, z_q = self.codebook.straight_through(z_e)
if l is not None:
if self.local_embed is not None:
l = self.local_embed(l)
z_q_st = torch.cat([z_q_st, l], dim=1)
if g is not None:
g = self.global_embed(g).unsqueeze(2).expand(-1, -1, z_q_st.size(2))
z_q_st = torch.cat([z_q_st, g], dim=1)
x_bar = self.decoder(z_q_st)
return x_bar, z_e, z_q
def encode(self, x):
"""Encode the inputs into the latent codes.
Args:
x (Tensor): Input tensor (B, in_channels, T).
Returns:
LongTensor: Quantized tensor (B, T).
"""
z_e = self.encoder(x)[-1]
z_e = z_e[-1] if isinstance(z_e, list) else z_e # For MelGAN Discriminator
return self.codebook(z_e)
def decode(self, indices, l=None, g=None):
"""Decode the latent codes to the inputs.
Args:
indices (LongTensor): Quantized tensor (B, T).
l (Tensor): Local conditioning tensor (B, num_local_embeds, T).
g (LongTensor): Global conditioning idx (B, ).
Return:
Tensor: Reconstruced tensor (B, 1, T).
"""
z_q = self.codebook.embedding(indices).transpose(2, 1)
if l is not None:
if self.local_embed is not None:
l = self.local_embed(l)
z_q = torch.cat([z_q, l], dim=1)
if g is not None:
g = self.global_embed(g).unsqueeze(2).expand(-1, -1, z_q.size(2))
z_q = torch.cat([z_q, g], dim=1)
return self.decoder(z_q)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
| 6,165 | 34.848837 | 97 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/models/melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""MelGAN Modules."""
import logging
import numpy as np
import torch
from parallel_wavegan.layers import CausalConv1d, CausalConvTranspose1d, ResidualStack
from parallel_wavegan.utils import read_hdf5
class MelGANGenerator(torch.nn.Module):
"""MelGAN generator module."""
def __init__(
self,
in_channels=80,
out_channels=1,
kernel_size=7,
channels=512,
bias=True,
upsample_scales=[8, 8, 2, 2],
stack_kernel_size=3,
stacks=3,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
use_final_nonlinear_activation=True,
use_weight_norm=True,
use_causal_conv=False,
):
"""Initialize MelGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of initial and final conv layer.
channels (int): Initial number of channels for conv layer.
bias (bool): Whether to add bias parameter in convolution layers.
upsample_scales (list): List of upsampling scales.
stack_kernel_size (int): Kernel size of dilated conv layers in residual stack.
stacks (int): Number of stacks in a single residual stack.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (dict): Hyperparameters for padding function.
use_final_nonlinear_activation (torch.nn.Module): Activation function for the final layer.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_causal_conv (bool): Whether to use causal convolution.
"""
super(MelGANGenerator, self).__init__()
# check hyper parameters is valid
assert channels >= np.prod(upsample_scales)
assert channels % (2 ** len(upsample_scales)) == 0
if not use_causal_conv:
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
# add initial layer
layers = []
if not use_causal_conv:
layers += [
getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
torch.nn.Conv1d(in_channels, channels, kernel_size, bias=bias),
]
else:
layers += [
CausalConv1d(
in_channels,
channels,
kernel_size,
bias=bias,
pad=pad,
pad_params=pad_params,
),
]
for i, upsample_scale in enumerate(upsample_scales):
# add upsampling layer
layers += [
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)
]
if not use_causal_conv:
layers += [
torch.nn.ConvTranspose1d(
channels // (2**i),
channels // (2 ** (i + 1)),
upsample_scale * 2,
stride=upsample_scale,
padding=upsample_scale // 2 + upsample_scale % 2,
output_padding=upsample_scale % 2,
bias=bias,
)
]
else:
layers += [
CausalConvTranspose1d(
channels // (2**i),
channels // (2 ** (i + 1)),
upsample_scale * 2,
stride=upsample_scale,
bias=bias,
)
]
# add residual stack
for j in range(stacks):
layers += [
ResidualStack(
kernel_size=stack_kernel_size,
channels=channels // (2 ** (i + 1)),
dilation=stack_kernel_size**j,
bias=bias,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
pad=pad,
pad_params=pad_params,
use_causal_conv=use_causal_conv,
)
]
# add final layer
layers += [
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)
]
if not use_causal_conv:
layers += [
getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
torch.nn.Conv1d(
channels // (2 ** (i + 1)), out_channels, kernel_size, bias=bias
),
]
else:
layers += [
CausalConv1d(
channels // (2 ** (i + 1)),
out_channels,
kernel_size,
bias=bias,
pad=pad,
pad_params=pad_params,
),
]
if use_final_nonlinear_activation:
layers += [torch.nn.Tanh()]
# define the model as a single function
self.melgan = torch.nn.Sequential(*layers)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
# initialize pqmf for inference
self.pqmf = None
def forward(self, c):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, 1, T ** prod(upsample_scales)).
"""
return self.melgan(c)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def reset_parameters(self):
"""Reset parameters.
This initialization follows official implementation manner.
https://github.com/descriptinc/melgan-neurips/blob/master/mel2wav/modules.py
"""
def _reset_parameters(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def register_stats(self, stats):
"""Register stats for de-normalization as buffer.
Args:
stats (str): Path of statistics file (".npy" or ".h5").
"""
assert stats.endswith(".h5") or stats.endswith(".npy")
if stats.endswith(".h5"):
mean = read_hdf5(stats, "mean").reshape(-1)
scale = read_hdf5(stats, "scale").reshape(-1)
else:
mean = np.load(stats)[0].reshape(-1)
scale = np.load(stats)[1].reshape(-1)
self.register_buffer("mean", torch.from_numpy(mean).float())
self.register_buffer("scale", torch.from_numpy(scale).float())
logging.info("Successfully registered stats as buffer.")
def inference(self, c, normalize_before=False):
"""Perform inference.
Args:
c (Union[Tensor, ndarray]): Input tensor (T, in_channels).
normalize_before (bool): Whether to perform normalization.
Returns:
Tensor: Output tensor (T ** prod(upsample_scales), out_channels).
"""
if not isinstance(c, torch.Tensor):
c = torch.tensor(c, dtype=torch.float).to(next(self.parameters()).device)
if normalize_before:
c = (c - self.mean) / self.scale
c = self.melgan(c.transpose(1, 0).unsqueeze(0))
if self.pqmf is not None:
c = self.pqmf.synthesis(c)
return c.squeeze(0).transpose(1, 0)
class MelGANDiscriminator(torch.nn.Module):
"""MelGAN discriminator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
kernel_sizes=[5, 3],
channels=16,
max_downsample_channels=1024,
bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
):
"""Initilize MelGAN discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,
and the first and the second kernel sizes will be used for the last two layers.
For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15,
the last two layers' kernel size will be 5 and 3, respectively.
channels (int): Initial number of channels for conv layer.
max_downsample_channels (int): Maximum number of channels for downsampling layers.
bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (dict): Hyperparameters for padding function.
"""
super(MelGANDiscriminator, self).__init__()
self.layers = torch.nn.ModuleList()
# check kernel size is valid
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1
assert kernel_sizes[1] % 2 == 1
# add first layer
self.layers += [
torch.nn.Sequential(
getattr(torch.nn, pad)((np.prod(kernel_sizes) - 1) // 2, **pad_params),
torch.nn.Conv1d(
in_channels, channels, np.prod(kernel_sizes), bias=bias
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
# add downsample layers
in_chs = channels
for downsample_scale in downsample_scales:
out_chs = min(in_chs * downsample_scale, max_downsample_channels)
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_size=downsample_scale * 10 + 1,
stride=downsample_scale,
padding=downsample_scale * 5,
groups=in_chs // 4,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
)
]
in_chs = out_chs
# add final layers
out_chs = min(in_chs * 2, max_downsample_channels)
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_sizes[0],
padding=(kernel_sizes[0] - 1) // 2,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
self.layers += [
torch.nn.Conv1d(
out_chs,
out_channels,
kernel_sizes[1],
padding=(kernel_sizes[1] - 1) // 2,
bias=bias,
),
]
# reset parameters
self.reset_parameters()
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of output tensors of each layer.
"""
outs = []
for f in self.layers:
x = f(x)
outs += [x]
return outs
def reset_parameters(self):
"""Reset parameters.
This initialization follows official implementation manner.
https://github.com/descriptinc/melgan-neurips/blob/master/mel2wav/modules.py
"""
def _reset_parameters(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
class MelGANMultiScaleDiscriminator(torch.nn.Module):
"""MelGAN multi-scale discriminator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
scales=3,
downsample_pooling="AvgPool1d",
# follow the official implementation setting
downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 1,
"count_include_pad": False,
},
kernel_sizes=[5, 3],
channels=16,
max_downsample_channels=1024,
bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
use_weight_norm=True,
):
"""Initilize MelGAN multi-scale discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scales (int): Number of multi-scales.
downsample_pooling (str): Pooling module name for downsampling of the inputs.
downsample_pooling_params (dict): Parameters for the above pooling module.
kernel_sizes (list): List of two kernel sizes. The sum will be used for the first conv layer,
and the first and the second kernel sizes will be used for the last two layers.
channels (int): Initial number of channels for conv layer.
max_downsample_channels (int): Maximum number of channels for downsampling layers.
bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (dict): Hyperparameters for padding function.
use_causal_conv (bool): Whether to use causal convolution.
"""
super(MelGANMultiScaleDiscriminator, self).__init__()
self.discriminators = torch.nn.ModuleList()
# add discriminators
for _ in range(scales):
self.discriminators += [
MelGANDiscriminator(
in_channels=in_channels,
out_channels=out_channels,
kernel_sizes=kernel_sizes,
channels=channels,
max_downsample_channels=max_downsample_channels,
bias=bias,
downsample_scales=downsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
pad=pad,
pad_params=pad_params,
)
]
self.pooling = getattr(torch.nn, downsample_pooling)(
**downsample_pooling_params
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs = []
for f in self.discriminators:
outs += [f(x)]
x = self.pooling(x)
return outs
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def reset_parameters(self):
"""Reset parameters.
This initialization follows official implementation manner.
https://github.com/descriptinc/melgan-neurips/blob/master/mel2wav/modules.py
"""
def _reset_parameters(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
| 18,873 | 34.278505 | 106 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/models/hifigan.py | # -*- coding: utf-8 -*-
"""HiFi-GAN Modules.
This code is based on https://github.com/jik876/hifi-gan.
"""
import copy
import logging
import numpy as np
import torch
import torch.nn.functional as F
from parallel_wavegan.layers import CausalConv1d, CausalConvTranspose1d
from parallel_wavegan.layers import HiFiGANResidualBlock as ResidualBlock
from parallel_wavegan.layers.duration_predictor import DurationPredictor
from parallel_wavegan.layers.length_regulator import LengthRegulator
from parallel_wavegan.utils import read_hdf5
class HiFiGANGenerator(torch.nn.Module):
"""HiFiGAN generator module."""
def __init__(
self,
in_channels=80,
out_channels=1,
channels=512,
kernel_size=7,
upsample_scales=(8, 8, 2, 2),
upsample_kernel_sizes=(16, 16, 4, 4),
resblock_kernel_sizes=(3, 7, 11),
resblock_dilations=[(1, 3, 5), (1, 3, 5), (1, 3, 5)],
use_additional_convs=True,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_causal_conv=False,
use_weight_norm=True,
):
"""Initialize HiFiGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
channels (int): Number of hidden representation channels.
kernel_size (int): Kernel size of initial and final conv layer.
upsample_scales (list): List of upsampling scales.
upsample_kernel_sizes (list): List of kernel sizes for upsampling layers.
resblock_kernel_sizes (list): List of kernel sizes for residual blocks.
resblock_dilations (list): List of dilation list for residual blocks.
use_additional_convs (bool): Whether to use additional conv layers in residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_causal_conv (bool): Whether to use causal structure.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
# check hyperparameters are valid
assert kernel_size % 2 == 1, "Kernel size must be odd number."
assert len(upsample_scales) == len(upsample_kernel_sizes)
assert len(resblock_dilations) == len(resblock_kernel_sizes)
# define modules
self.num_upsamples = len(upsample_kernel_sizes)
self.num_blocks = len(resblock_kernel_sizes)
self.use_causal_conv = use_causal_conv
if not use_causal_conv:
self.input_conv = torch.nn.Conv1d(
in_channels,
channels,
kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
)
else:
self.input_conv = CausalConv1d(
in_channels,
channels,
kernel_size,
bias=bias,
)
self.upsamples = torch.nn.ModuleList()
self.blocks = torch.nn.ModuleList()
for i in range(len(upsample_kernel_sizes)):
assert upsample_kernel_sizes[i] == 2 * upsample_scales[i]
if not use_causal_conv:
self.upsamples += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.ConvTranspose1d(
channels // (2**i),
channels // (2 ** (i + 1)),
upsample_kernel_sizes[i],
upsample_scales[i],
padding=upsample_scales[i] // 2 + upsample_scales[i] % 2,
output_padding=upsample_scales[i] % 2,
bias=bias,
),
)
]
else:
self.upsamples += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
CausalConvTranspose1d(
channels // (2**i),
channels // (2 ** (i + 1)),
upsample_kernel_sizes[i],
upsample_scales[i],
bias=bias,
),
)
]
for j in range(len(resblock_kernel_sizes)):
self.blocks += [
ResidualBlock(
kernel_size=resblock_kernel_sizes[j],
channels=channels // (2 ** (i + 1)),
dilations=resblock_dilations[j],
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
use_causal_conv=use_causal_conv,
)
]
if not use_causal_conv:
self.output_conv = torch.nn.Sequential(
# NOTE(kan-bayashi): follow official implementation but why
# using different slope parameter here? (0.1 vs. 0.01)
torch.nn.LeakyReLU(),
torch.nn.Conv1d(
channels // (2 ** (i + 1)),
out_channels,
kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
),
torch.nn.Tanh(),
)
else:
self.output_conv = torch.nn.Sequential(
# NOTE(kan-bayashi): follow official implementation but why
# using different slope parameter here? (0.1 vs. 0.01)
torch.nn.LeakyReLU(),
CausalConv1d(
channels // (2 ** (i + 1)),
out_channels,
kernel_size,
bias=bias,
),
torch.nn.Tanh(),
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, c):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
Returns:
Tensor: Output tensor (B, out_channels, T).
"""
c = self.input_conv(c)
for i in range(self.num_upsamples):
c = self.upsamples[i](c)
cs = 0.0 # initialize
for j in range(self.num_blocks):
cs += self.blocks[i * self.num_blocks + j](c)
c = cs / self.num_blocks
c = self.output_conv(c)
return c
def reset_parameters(self):
"""Reset parameters.
This initialization follows the official implementation manner.
https://github.com/jik876/hifi-gan/blob/master/models.py
"""
def _reset_parameters(m):
if isinstance(m, (torch.nn.Conv1d, torch.nn.ConvTranspose1d)):
m.weight.data.normal_(0.0, 0.01)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def register_stats(self, stats):
"""Register stats for de-normalization as buffer.
Args:
stats (str): Path of statistics file (".npy" or ".h5").
"""
assert stats.endswith(".h5") or stats.endswith(".npy")
if stats.endswith(".h5"):
mean = read_hdf5(stats, "mean").reshape(-1)
scale = read_hdf5(stats, "scale").reshape(-1)
else:
mean = np.load(stats)[0].reshape(-1)
scale = np.load(stats)[1].reshape(-1)
self.register_buffer("mean", torch.from_numpy(mean).float())
self.register_buffer("scale", torch.from_numpy(scale).float())
logging.info("Successfully registered stats as buffer.")
def inference(self, c, normalize_before=False):
"""Perform inference.
Args:
c (Union[Tensor, ndarray]): Input tensor (T, in_channels).
normalize_before (bool): Whether to perform normalization.
Returns:
Tensor: Output tensor (T ** prod(upsample_scales), out_channels).
"""
if not isinstance(c, torch.Tensor):
c = torch.tensor(c, dtype=torch.float).to(next(self.parameters()).device)
if normalize_before:
c = (c - self.mean) / self.scale
c = self.forward(c.transpose(1, 0).unsqueeze(0))
return c.squeeze(0).transpose(1, 0)
class HiFiGANPeriodDiscriminator(torch.nn.Module):
"""HiFiGAN period discriminator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
period=3,
kernel_sizes=[5, 3],
channels=32,
downsample_scales=[3, 3, 3, 3, 1],
max_downsample_channels=1024,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
use_spectral_norm=False,
):
"""Initialize HiFiGANPeriodDiscriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
period (int): Period.
kernel_sizes (list): Kernel sizes of initial conv layers and the final conv layer.
channels (int): Number of initial channels.
downsample_scales (list): List of downsampling scales.
max_downsample_channels (int): Number of maximum downsampling channels.
use_additional_convs (bool): Whether to use additional conv layers in residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_spectral_norm (bool): Whether to use spectral norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1, "Kernel size must be odd number."
assert kernel_sizes[1] % 2 == 1, "Kernel size must be odd number."
self.period = period
self.convs = torch.nn.ModuleList()
in_chs = in_channels
out_chs = channels
for downsample_scale in downsample_scales:
self.convs += [
torch.nn.Sequential(
torch.nn.Conv2d(
in_chs,
out_chs,
(kernel_sizes[0], 1),
(downsample_scale, 1),
padding=((kernel_sizes[0] - 1) // 2, 0),
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
)
]
in_chs = out_chs
# NOTE(kan-bayashi): Use downsample_scale + 1?
out_chs = min(out_chs * 4, max_downsample_channels)
self.output_conv = torch.nn.Conv2d(
out_chs,
out_channels,
(kernel_sizes[1] - 1, 1),
1,
padding=((kernel_sizes[1] - 1) // 2, 0),
)
if use_weight_norm and use_spectral_norm:
raise ValueError("Either use use_weight_norm or use_spectral_norm.")
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# apply spectral norm
if use_spectral_norm:
self.apply_spectral_norm()
def forward(self, x):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
Returns:
list: List of each layer's tensors.
"""
# transform 1d to 2d -> (B, C, T/P, P)
b, c, t = x.shape
if t % self.period != 0:
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t += n_pad
x = x.view(b, c, t // self.period, self.period)
# forward conv
outs = []
for layer in self.convs:
x = layer(x)
outs += [x]
x = self.output_conv(x)
x = torch.flatten(x, 1, -1)
outs += [x]
return outs
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def apply_spectral_norm(self):
"""Apply spectral normalization module from all of the layers."""
def _apply_spectral_norm(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.spectral_norm(m)
logging.debug(f"Spectral norm is applied to {m}.")
self.apply(_apply_spectral_norm)
class HiFiGANMultiPeriodDiscriminator(torch.nn.Module):
"""HiFiGAN multi-period discriminator module."""
def __init__(
self,
periods=[2, 3, 5, 7, 11],
discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
):
"""Initialize HiFiGANMultiPeriodDiscriminator module.
Args:
periods (list): List of periods.
discriminator_params (dict): Parameters for hifi-gan period discriminator module.
The period parameter will be overwritten.
"""
super().__init__()
self.discriminators = torch.nn.ModuleList()
for period in periods:
params = copy.deepcopy(discriminator_params)
params["period"] = period
self.discriminators += [HiFiGANPeriodDiscriminator(**params)]
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs = []
for f in self.discriminators:
outs += [f(x)]
return outs
class HiFiGANScaleDiscriminator(torch.nn.Module):
"""HiFi-GAN scale discriminator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
kernel_sizes=[15, 41, 5, 3],
channels=128,
max_downsample_channels=1024,
max_groups=16,
bias=True,
downsample_scales=[2, 2, 4, 4, 1],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
use_spectral_norm=False,
):
"""Initilize HiFiGAN scale discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_sizes (list): List of four kernel sizes. The first will be used for the first conv layer,
and the second is for downsampling part, and the remaining two are for output layers.
channels (int): Initial number of channels for conv layer.
max_downsample_channels (int): Maximum number of channels for downsampling layers.
bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_spectral_norm (bool): Whether to use spectral norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
self.layers = torch.nn.ModuleList()
# check kernel size is valid
assert len(kernel_sizes) == 4
for ks in kernel_sizes:
assert ks % 2 == 1
# add first layer
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_channels,
channels,
# NOTE(kan-bayashi): Use always the same kernel size
kernel_sizes[0],
bias=bias,
padding=(kernel_sizes[0] - 1) // 2,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
# add downsample layers
in_chs = channels
out_chs = channels
# NOTE(kan-bayashi): Remove hard coding?
groups = 4
for downsample_scale in downsample_scales:
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_size=kernel_sizes[1],
stride=downsample_scale,
padding=(kernel_sizes[1] - 1) // 2,
groups=groups,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
)
]
in_chs = out_chs
# NOTE(kan-bayashi): Remove hard coding?
out_chs = min(in_chs * 2, max_downsample_channels)
# NOTE(kan-bayashi): Remove hard coding?
groups = min(groups * 4, max_groups)
# add final layers
out_chs = min(in_chs * 2, max_downsample_channels)
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_size=kernel_sizes[2],
stride=1,
padding=(kernel_sizes[2] - 1) // 2,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
self.layers += [
torch.nn.Conv1d(
out_chs,
out_channels,
kernel_size=kernel_sizes[3],
stride=1,
padding=(kernel_sizes[3] - 1) // 2,
bias=bias,
),
]
if use_weight_norm and use_spectral_norm:
raise ValueError("Either use use_weight_norm or use_spectral_norm.")
# apply weight norm
self.use_weight_norm = use_weight_norm
if use_weight_norm:
self.apply_weight_norm()
# apply spectral norm
self.use_spectral_norm = use_spectral_norm
if use_spectral_norm:
self.apply_spectral_norm()
# backward compatibility
self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of output tensors of each layer.
"""
outs = []
for f in self.layers:
x = f(x)
outs += [x]
return outs
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def apply_spectral_norm(self):
"""Apply spectral normalization module from all of the layers."""
def _apply_spectral_norm(m):
if isinstance(m, torch.nn.Conv1d):
torch.nn.utils.spectral_norm(m)
logging.debug(f"Spectral norm is applied to {m}.")
self.apply(_apply_spectral_norm)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def remove_spectral_norm(self):
"""Remove spectral normalization module from all of the layers."""
def _remove_spectral_norm(m):
try:
logging.debug(f"Spectral norm is removed from {m}.")
torch.nn.utils.remove_spectral_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_spectral_norm)
def _load_state_dict_pre_hook(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
"""Fix the compatibility of weight / spectral normalization issue.
Some pretrained models are trained with configs that use weight / spectral
normalization, but actually, the norm is not applied. This causes the mismatch
of the parameters with configs. To solve this issue, when parameter mismatch
happens in loading pretrained model, we remove the norm from the current model.
See also:
- https://github.com/kan-bayashi/ParallelWaveGAN/pull/409
- https://github.com/espnet/espnet/pull/5240
"""
current_module_keys = [x for x in state_dict.keys() if x.startswith(prefix)]
if self.use_weight_norm and not any(
["weight_g" in k for k in current_module_keys]
):
logging.warning(
"It seems weight norm is not applied in the pretrained model but the"
" current model uses it. To keep the compatibility, we remove the norm"
" from the current model. This may causes training error due to the the"
" parameter mismatch when finetuning. To avoid this issue, please"
" change the following parameters in config to false: \n"
" - discriminator_params.follow_official_norm \n"
" - discriminator_params.scale_discriminator_params.use_weight_norm \n"
" - discriminator_params.scale_discriminator_params.use_spectral_norm \n"
" See also: https://github.com/kan-bayashi/ParallelWaveGAN/pull/409"
)
self.remove_weight_norm()
self.use_weight_norm = False
if self.use_spectral_norm and not any(
["weight_u" in k for k in current_module_keys]
):
logging.warning(
"It seems spectral norm is not applied in the pretrained model but the"
" current model uses it. To keep the compatibility, we remove the norm"
" from the current model. This may causes training error due to the the"
" parameter mismatch when finetuning. To avoid this issue, please"
" change the following parameters in config to false: \n"
" - discriminator_params.follow_official_norm \n"
" - discriminator_params.scale_discriminator_params.use_weight_norm \n"
" - discriminator_params.scale_discriminator_params.use_spectral_norm \n"
" See also: https://github.com/kan-bayashi/ParallelWaveGAN/pull/409"
)
self.remove_spectral_norm()
self.use_spectral_norm = False
class HiFiGANMultiScaleDiscriminator(torch.nn.Module):
"""HiFi-GAN multi-scale discriminator module."""
def __init__(
self,
scales=3,
downsample_pooling="AvgPool1d",
# follow the official implementation setting
downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm=False,
):
"""Initilize HiFiGAN multi-scale discriminator module.
Args:
scales (int): Number of multi-scales.
downsample_pooling (str): Pooling module name for downsampling of the inputs.
downsample_pooling_params (dict): Parameters for the above pooling module.
discriminator_params (dict): Parameters for hifi-gan scale discriminator module.
follow_official_norm (bool): Whether to follow the norm setting of the official
implementaion. The first discriminator uses spectral norm and the other
discriminators use weight norm.
"""
super().__init__()
self.discriminators = torch.nn.ModuleList()
# add discriminators
for i in range(scales):
params = copy.deepcopy(discriminator_params)
if follow_official_norm:
if i == 0:
params["use_weight_norm"] = False
params["use_spectral_norm"] = True
else:
params["use_weight_norm"] = True
params["use_spectral_norm"] = False
self.discriminators += [HiFiGANScaleDiscriminator(**params)]
self.pooling = getattr(torch.nn, downsample_pooling)(
**downsample_pooling_params
)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs = []
for f in self.discriminators:
outs += [f(x)]
x = self.pooling(x)
return outs
class HiFiGANMultiScaleMultiPeriodDiscriminator(torch.nn.Module):
"""HiFi-GAN multi-scale + multi-period discriminator module."""
def __init__(
self,
# Multi-scale discriminator related
scales=3,
scale_downsample_pooling="AvgPool1d",
scale_downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
scale_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm=True,
# Multi-period discriminator related
periods=[2, 3, 5, 7, 11],
period_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
):
"""Initilize HiFiGAN multi-scale + multi-period discriminator module.
Args:
scales (int): Number of multi-scales.
scale_downsample_pooling (str): Pooling module name for downsampling of the inputs.
scale_downsample_pooling_params (dict): Parameters for the above pooling module.
scale_discriminator_params (dict): Parameters for hifi-gan scale discriminator module.
follow_official_norm (bool): Whether to follow the norm setting of the official
implementaion. The first discriminator uses spectral norm and the other
discriminators use weight norm.
periods (list): List of periods.
period_discriminator_params (dict): Parameters for hifi-gan period discriminator module.
The period parameter will be overwritten.
"""
super().__init__()
self.msd = HiFiGANMultiScaleDiscriminator(
scales=scales,
downsample_pooling=scale_downsample_pooling,
downsample_pooling_params=scale_downsample_pooling_params,
discriminator_params=scale_discriminator_params,
follow_official_norm=follow_official_norm,
)
self.mpd = HiFiGANMultiPeriodDiscriminator(
periods=periods,
discriminator_params=period_discriminator_params,
)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of list of each discriminator outputs,
which consists of each layer output tensors.
Multi scale and multi period ones are concatenated.
"""
msd_outs = self.msd(x)
mpd_outs = self.mpd(x)
return msd_outs + mpd_outs
class DiscreteSymbolHiFiGANGenerator(torch.nn.Module):
"""Discrete Symbol HiFiGAN generator module."""
def __init__(
self,
in_channels=512,
out_channels=1,
channels=512,
num_embs=100,
num_spk_embs=128,
spk_emb_dim=128,
concat_spk_emb=False,
kernel_size=7,
upsample_scales=(8, 8, 2, 2),
upsample_kernel_sizes=(16, 16, 4, 4),
resblock_kernel_sizes=(3, 7, 11),
resblock_dilations=[(1, 3, 5), (1, 3, 5), (1, 3, 5)],
use_additional_convs=True,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
):
"""Initialize HiFiGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
channels (int): Number of hidden representation channels.
num_embs (int): Discrete symbol size
num_spk_embs (int): Speaker numbers for sPkeaer ID-based embedding
spk_emb_dim (int): Dimension of speaker embedding
concat_spk_emb (bool): whether to concat speaker embedding to the input
kernel_size (int): Kernel size of initial and final conv layer.
upsample_scales (list): List of upsampling scales.
upsample_kernel_sizes (list): List of kernel sizes for upsampling layers.
resblock_kernel_sizes (list): List of kernel sizes for residual blocks.
resblock_dilations (list): List of dilation list for residual blocks.
use_additional_convs (bool): Whether to use additional conv layers in residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
self.num_spk_embs = num_spk_embs
# define id embedding
self.emb = torch.nn.Embedding(
num_embeddings=num_embs, embedding_dim=in_channels
)
if self.num_spk_embs > 0:
self.spk_emb = torch.nn.Embedding(
num_embeddings=num_spk_embs, embedding_dim=spk_emb_dim
)
self.concat_spk_emb = concat_spk_emb
if not concat_spk_emb:
assert in_channels == spk_emb_dim
else:
in_channels = in_channels + spk_emb_dim
# check hyperparameters are valid
assert kernel_size % 2 == 1, "Kernal size must be odd number."
assert len(upsample_scales) == len(upsample_kernel_sizes)
assert len(resblock_dilations) == len(resblock_kernel_sizes)
# define modules
self.num_upsamples = len(upsample_kernel_sizes)
self.num_blocks = len(resblock_kernel_sizes)
self.input_conv = torch.nn.Conv1d(
in_channels,
channels,
kernel_size,
1,
padding=(kernel_size - 1) // 2,
)
self.upsamples = torch.nn.ModuleList()
self.blocks = torch.nn.ModuleList()
for i in range(len(upsample_kernel_sizes)):
self.upsamples += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.ConvTranspose1d(
channels // (2**i),
channels // (2 ** (i + 1)),
upsample_kernel_sizes[i],
upsample_scales[i],
padding=(upsample_kernel_sizes[i] - upsample_scales[i]) // 2,
),
)
]
for j in range(len(resblock_kernel_sizes)):
self.blocks += [
ResidualBlock(
kernel_size=resblock_kernel_sizes[j],
channels=channels // (2 ** (i + 1)),
dilations=resblock_dilations[j],
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
)
]
self.output_conv = torch.nn.Sequential(
# NOTE(kan-bayashi): follow official implementation but why
# using different slope parameter here? (0.1 vs. 0.01)
torch.nn.LeakyReLU(),
torch.nn.Conv1d(
channels // (2 ** (i + 1)),
out_channels,
kernel_size,
1,
padding=(kernel_size - 1) // 2,
),
torch.nn.Tanh(),
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, c):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, 2, T).
Returns:
Tensor: Output tensor (B, out_channels, T).
"""
# convert idx to embedding
if self.num_spk_embs > 0:
assert c.size(1) == 2
c_idx, g_idx = c.long().split(1, dim=1)
c = self.emb(c_idx.squeeze(1)).transpose(1, 2) # (B, C, T)
g = self.spk_emb(g_idx[:, 0, 0])
# integrate global embedding
if not self.concat_spk_emb:
c = c + g.unsqueeze(2)
else:
g = g.unsqueeze(1).expand(-1, c.size(1), -1)
c = torch.cat([c, g], dim=-1)
else:
assert c.size(1) == 1
c = self.emb(c.squeeze(1).long()).transpose(1, 2) # (B, C, T)
c = self.input_conv(c)
for i in range(self.num_upsamples):
c = self.upsamples[i](c)
cs = 0.0 # initialize
for j in range(self.num_blocks):
cs += self.blocks[i * self.num_blocks + j](c)
c = cs / self.num_blocks
c = self.output_conv(c)
return c
def reset_parameters(self):
"""Reset parameters.
This initialization follows the official implementation manner.
https://github.com/jik876/hifi-gan/blob/master/models.py
"""
def _reset_parameters(m):
if isinstance(m, (torch.nn.Conv1d, torch.nn.ConvTranspose1d)):
m.weight.data.normal_(0.0, 0.01)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def inference(self, c, g=None, normalize_before=False):
"""Perform inference.
Args:
c (Union[Tensor, ndarray]): Input tensor (T, 2).
Returns:
Tensor: Output tensor (T ** prod(upsample_scales), out_channels).
"""
assert not normalize_before, "No statistics are used."
if not isinstance(c, torch.Tensor):
c = torch.tensor(c, dtype=torch.long).to(next(self.parameters()).device)
if g is not None:
c = c[:, 0:1]
c = torch.cat([c, c.new_zeros(*c.size()).fill_(g).to(c.device)], dim=1)
if self.num_spk_embs <= 0:
c = c[:, 0:1]
c = self.forward(c.transpose(1, 0).unsqueeze(0))
return c.squeeze(0).transpose(1, 0)
class DiscreteSymbolDurationGenerator(DiscreteSymbolHiFiGANGenerator):
"""Discrete Symbol HiFiGAN generator with duration predictor module."""
def __init__(
self,
in_channels=512,
out_channels=1,
channels=512,
num_embs=100,
num_spk_embs=128,
spk_emb_dim=128,
concat_spk_emb=False,
duration_layers=2,
duration_chans=384,
duration_kernel_size=3,
duration_offset=1.0,
duration_dropout_rate=0.5,
kernel_size=7,
upsample_scales=(8, 8, 2, 2),
upsample_kernel_sizes=(16, 16, 4, 4),
resblock_kernel_sizes=(3, 7, 11),
resblock_dilations=[(1, 3, 5), (1, 3, 5), (1, 3, 5)],
use_additional_convs=True,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
):
"""Initialize DiscreteSymbolDurationGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
channels (int): Number of hidden representation channels.
num_embs (int): Discrete symbol size
num_spk_embs (int): Speaker numbers for sPkeaer ID-based embedding
spk_emb_dim (int): Dimension of speaker embedding
concat_spk_emb (bool): whether to concat speaker embedding to the input
duration_layers (int): number of duration predictor layers
duration_chans (int): number of duration predictor channels
duration_kernel_size (int): kernel size for the duration predictor
duration_offset (float): duration predictor offset
duration_dropout_rate (float): duration predictor dropout rate
kernel_size (int): Kernel size of initial and final conv layer.
upsample_scales (list): List of upsampling scales.
upsample_kernel_sizes (list): List of kernel sizes for upsampling layers.
resblock_kernel_sizes (list): List of kernel sizes for residual blocks.
resblock_dilations (list): List of dilation list for residual blocks.
use_additional_convs (bool): Whether to use additional conv layers in residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
channels=channels,
num_embs=num_embs + 1, # for padding case
num_spk_embs=num_spk_embs,
spk_emb_dim=spk_emb_dim,
concat_spk_emb=concat_spk_emb,
kernel_size=kernel_size,
upsample_scales=upsample_scales,
upsample_kernel_sizes=upsample_kernel_sizes,
resblock_kernel_sizes=resblock_kernel_sizes,
resblock_dilations=resblock_dilations,
use_additional_convs=use_additional_convs,
bias=bias,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
use_weight_norm=use_weight_norm,
)
if self.num_spk_embs > 0:
in_channels = in_channels + spk_emb_dim
self.duration_predictor = DurationPredictor(
in_channels,
n_layers=duration_layers,
n_chans=duration_chans,
kernel_size=duration_kernel_size,
dropout_rate=duration_dropout_rate,
offset=duration_offset,
)
self.length_regulator = LengthRegulator()
def forward(self, c, ds):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, 2, T). or (B, 1, T)
ds (Tensor): Input tensor (B, T)
Returns:
Tensor: Output tensor (B, out_channels, T').
"""
# convert idx to embedding
if self.num_spk_embs > 0:
assert c.size(1) == 2
c_idx, g_idx = c.long().split(1, dim=1)
c = self.emb(c_idx.squeeze(1)).transpose(1, 2) # (B, C, T)
g = self.spk_emb(g_idx[:, 0, 0])
# integrate global embedding
if not self.concat_spk_emb:
c = c + g.unsqueeze(2)
else:
g = g.unsqueeze(1).expand(-1, c.size(1), -1)
c = torch.cat([c, g], dim=-1)
else:
assert c.size(1) == 1
c = self.emb(c.squeeze(1).long()).transpose(1, 2) # (B, C, T)
ds_out = self.duration_predictor(c.transpose(1, 2))
c = self.length_regulator(c.transpose(1, 2), ds).transpose(1, 2)
c = self.input_conv(c)
for i in range(self.num_upsamples):
c = self.upsamples[i](c)
cs = 0.0 # initialize
for j in range(self.num_blocks):
cs += self.blocks[i * self.num_blocks + j](c)
c = cs / self.num_blocks
c = self.output_conv(c)
return c, ds_out
def inference(self, c, g=None, ds=None, normalize_before=False):
"""Perform inference.
Args:
c (Union[Tensor, ndarray]): Input tensor (T, 2).
Returns:
Tensor: Output tensor (T ** prod(upsample_scales), out_channels).
"""
assert not normalize_before, "No statistics are used."
if not isinstance(c, torch.Tensor):
c = torch.tensor(c, dtype=torch.long).to(next(self.parameters()).device)
if g is not None:
c = c[:, 0:1]
c = torch.cat([c, c.new_zeros(*c.size()).fill_(g).to(c.device)], dim=1)
if self.num_spk_embs <= 0:
c = c[:, 0:1]
if ds is None:
c, _ = self.synthesis(c.transpose(1, 0).unsqueeze(0))
else:
c, _ = self.forward(c.transpose(1, 0).unsqueeze(0), ds.unsqueeze(0))
return c.squeeze(0).transpose(1, 0)
def synthesis(self, c):
"""Synthesis with duration prediction.
Args:
c (Tensor): Input tensor (B, 2, T) or (B, 1, T).
Returns:
Tensor: Output tensor (B, out_channels, T').
"""
# convert idx to embedding
if self.num_spk_embs > 0:
assert c.size(1) == 2
c_idx, g_idx = c.long().split(1, dim=1)
c = self.emb(c_idx.squeeze(1)).transpose(1, 2) # (B, C, T)
g = self.spk_emb(g_idx[:, 0, 0])
# integrate global embedding
if not self.concat_spk_emb:
c = c + g.unsqueeze(2)
else:
g = g.unsqueeze(1).expand(-1, c.size(1), -1)
c = torch.cat([c, g], dim=-1)
else:
assert c.size(1) == 1
c = self.emb(c.squeeze(1).long()).transpose(1, 2) # (B, C, T)
ds_out = self.duration_predictor.inference(c.transpose(1, 2))
c = self.length_regulator(c.transpose(1, 2), ds_out).transpose(1, 2)
c = self.input_conv(c)
for i in range(self.num_upsamples):
c = self.upsamples[i](c)
cs = 0.0 # initialize
for j in range(self.num_blocks):
cs += self.blocks[i * self.num_blocks + j](c)
c = cs / self.num_blocks
c = self.output_conv(c)
return c, ds_out
| 47,878 | 36.115504 | 108 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/models/style_melgan.py | # Copyright 2021 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""StyleMelGAN Modules."""
import copy
import logging
import numpy as np
import torch
import torch.nn.functional as F
from parallel_wavegan.layers import PQMF, TADEResBlock
from parallel_wavegan.models import MelGANDiscriminator as BaseDiscriminator
from parallel_wavegan.utils import read_hdf5
class StyleMelGANGenerator(torch.nn.Module):
"""Style MelGAN generator module."""
def __init__(
self,
in_channels=128,
aux_channels=80,
channels=64,
out_channels=1,
kernel_size=9,
dilation=2,
bias=True,
noise_upsample_scales=[11, 2, 2, 2],
noise_upsample_activation="LeakyReLU",
noise_upsample_activation_params={"negative_slope": 0.2},
upsample_scales=[2, 2, 2, 2, 2, 2, 2, 2, 1],
upsample_mode="nearest",
gated_function="softmax",
use_weight_norm=True,
):
"""Initilize Style MelGAN generator.
Args:
in_channels (int): Number of input noise channels.
aux_channels (int): Number of auxiliary input channels.
channels (int): Number of channels for conv layer.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of conv layers.
dilation (int): Dilation factor for conv layers.
bias (bool): Whether to add bias parameter in convolution layers.
noise_upsample_scales (list): List of noise upsampling scales.
noise_upsample_activation (str): Activation function module name for noise upsampling.
noise_upsample_activation_params (dict): Hyperparameters for the above activation function.
upsample_scales (list): List of upsampling scales.
upsample_mode (str): Upsampling mode in TADE layer.
gated_function (str): Gated function in TADEResBlock ("softmax" or "sigmoid").
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
self.in_channels = in_channels
noise_upsample = []
in_chs = in_channels
for noise_upsample_scale in noise_upsample_scales:
# NOTE(kan-bayashi): How should we design noise upsampling part?
noise_upsample += [
torch.nn.ConvTranspose1d(
in_chs,
channels,
noise_upsample_scale * 2,
stride=noise_upsample_scale,
padding=noise_upsample_scale // 2 + noise_upsample_scale % 2,
output_padding=noise_upsample_scale % 2,
bias=bias,
)
]
noise_upsample += [
getattr(torch.nn, noise_upsample_activation)(
**noise_upsample_activation_params
)
]
in_chs = channels
self.noise_upsample = torch.nn.Sequential(*noise_upsample)
self.noise_upsample_factor = np.prod(noise_upsample_scales)
self.blocks = torch.nn.ModuleList()
aux_chs = aux_channels
for upsample_scale in upsample_scales:
self.blocks += [
TADEResBlock(
in_channels=channels,
aux_channels=aux_chs,
kernel_size=kernel_size,
dilation=dilation,
bias=bias,
upsample_factor=upsample_scale,
upsample_mode=upsample_mode,
gated_function=gated_function,
),
]
aux_chs = channels
self.upsample_factor = np.prod(upsample_scales)
self.output_conv = torch.nn.Sequential(
torch.nn.Conv1d(
channels,
out_channels,
kernel_size,
1,
bias=bias,
padding=(kernel_size - 1) // 2,
),
torch.nn.Tanh(),
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, c, z=None):
"""Calculate forward propagation.
Args:
c (Tensor): Auxiliary input tensor (B, channels, T).
z (Tensor): Input noise tensor (B, in_channels, 1).
Returns:
Tensor: Output tensor (B, out_channels, T ** prod(upsample_scales)).
"""
if z is None:
z = torch.randn(c.size(0), self.in_channels, 1).to(
device=c.device,
dtype=c.dtype,
)
x = self.noise_upsample(z)
for block in self.blocks:
x, c = block(x, c)
x = self.output_conv(x)
return x
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def reset_parameters(self):
"""Reset parameters."""
def _reset_parameters(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def register_stats(self, stats):
"""Register stats for de-normalization as buffer.
Args:
stats (str): Path of statistics file (".npy" or ".h5").
"""
assert stats.endswith(".h5") or stats.endswith(".npy")
if stats.endswith(".h5"):
mean = read_hdf5(stats, "mean").reshape(-1)
scale = read_hdf5(stats, "scale").reshape(-1)
else:
mean = np.load(stats)[0].reshape(-1)
scale = np.load(stats)[1].reshape(-1)
self.register_buffer("mean", torch.from_numpy(mean).float())
self.register_buffer("scale", torch.from_numpy(scale).float())
logging.info("Successfully registered stats as buffer.")
def inference(self, c, normalize_before=False):
"""Perform inference.
Args:
c (Union[Tensor, ndarray]): Input tensor (T, in_channels).
normalize_before (bool): Whether to perform normalization.
Returns:
Tensor: Output tensor (T ** prod(upsample_scales), out_channels).
"""
if not isinstance(c, torch.Tensor):
c = torch.tensor(c, dtype=torch.float).to(next(self.parameters()).device)
if normalize_before:
c = (c - self.mean) / self.scale
c = c.transpose(1, 0).unsqueeze(0)
# prepare noise input
noise_size = (
1,
self.in_channels,
(c.size(2) - 1) // self.noise_upsample_factor + 1,
)
noise = torch.randn(*noise_size, dtype=torch.float).to(
next(self.parameters()).device
)
x = self.noise_upsample(noise)
# NOTE(kan-bayashi): To remove pop noise at the end of audio, perform padding
# for feature sequence and after generation cut the generated audio. This
# requires additional computation but it can prevent pop noise.
total_length = c.size(2) * self.upsample_factor
c = F.pad(c, (0, x.size(2) - c.size(2)), "replicate")
# This version causes pop noise.
# x = x[:, :, :c.size(2)]
for block in self.blocks:
x, c = block(x, c)
x = self.output_conv(x)[..., :total_length]
return x.squeeze(0).transpose(1, 0)
class StyleMelGANDiscriminator(torch.nn.Module):
"""Style MelGAN disciminator module."""
def __init__(
self,
repeats=2,
window_sizes=[512, 1024, 2048, 4096],
pqmf_params=[
[1, None, None, None],
[2, 62, 0.26700, 9.0],
[4, 62, 0.14200, 9.0],
[8, 62, 0.07949, 9.0],
],
discriminator_params={
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"max_downsample_channels": 512,
"bias": True,
"downsample_scales": [4, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.2},
"pad": "ReflectionPad1d",
"pad_params": {},
},
use_weight_norm=True,
):
"""Initilize Style MelGAN discriminator.
Args:
repeats (int): Number of repititons to apply RWD.
window_sizes (list): List of random window sizes.
pqmf_params (list): List of list of Parameters for PQMF modules
discriminator_params (dict): Parameters for base discriminator module.
use_weight_nom (bool): Whether to apply weight normalization.
"""
super().__init__()
# window size check
assert len(window_sizes) == len(pqmf_params)
sizes = [ws // p[0] for ws, p in zip(window_sizes, pqmf_params)]
assert len(window_sizes) == sum([sizes[0] == size for size in sizes])
self.repeats = repeats
self.window_sizes = window_sizes
self.pqmfs = torch.nn.ModuleList()
self.discriminators = torch.nn.ModuleList()
for pqmf_param in pqmf_params:
d_params = copy.deepcopy(discriminator_params)
d_params["in_channels"] = pqmf_param[0]
if pqmf_param[0] == 1:
self.pqmfs += [torch.nn.Identity()]
else:
self.pqmfs += [PQMF(*pqmf_param)]
self.discriminators += [BaseDiscriminator(**d_params)]
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, 1, T).
Returns:
List: List of discriminator outputs, #items in the list will be
equal to repeats * #discriminators.
"""
outs = []
for _ in range(self.repeats):
outs += self._forward(x)
return outs
def _forward(self, x):
outs = []
for idx, (ws, pqmf, disc) in enumerate(
zip(self.window_sizes, self.pqmfs, self.discriminators)
):
# NOTE(kan-bayashi): Is it ok to apply different window for real and fake samples?
start_idx = np.random.randint(x.size(-1) - ws)
x_ = x[:, :, start_idx : start_idx + ws]
if idx == 0:
x_ = pqmf(x_)
else:
x_ = pqmf.analysis(x_)
outs += [disc(x_)]
return outs
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def reset_parameters(self):
"""Reset parameters."""
def _reset_parameters(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
class DiscreteSymbolStyleMelGANGenerator(torch.nn.Module):
"""Discrete Symbol Style MelGAN generator module."""
def __init__(
self,
in_channels=128,
aux_channels=128,
channels=64,
out_channels=1,
num_embs=100,
num_spk_embs=128,
spk_emb_dim=128,
concat_spk_emb=False,
kernel_size=9,
dilation=2,
bias=True,
noise_upsample_scales=[11, 2, 2, 2],
noise_upsample_activation="LeakyReLU",
noise_upsample_activation_params={"negative_slope": 0.2},
upsample_scales=[2, 2, 2, 2, 2, 2, 2, 2, 1],
upsample_mode="nearest",
gated_function="softmax",
use_weight_norm=True,
):
"""Initilize Style MelGAN generator.
Args:
in_channels (int): Number of input noise channels.
aux_channels (int): Number of auxiliary input channels.
channels (int): Number of channels for conv layer.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of conv layers.
dilation (int): Dilation factor for conv layers.
bias (bool): Whether to add bias parameter in convolution layers.
noise_upsample_scales (list): List of noise upsampling scales.
noise_upsample_activation (str): Activation function module name for noise upsampling.
noise_upsample_activation_params (dict): Hyperparameters for the above activation function.
upsample_scales (list): List of upsampling scales.
upsample_mode (str): Upsampling mode in TADE layer.
gated_function (str): Gated function in TADEResBlock ("softmax" or "sigmoid").
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
self.in_channels = in_channels
# define id embedding
self.emb = torch.nn.Embedding(
num_embeddings=num_embs, embedding_dim=aux_channels
)
self.spk_emb = torch.nn.Embedding(
num_embeddings=num_spk_embs, embedding_dim=spk_emb_dim
)
self.concat_spk_emb = concat_spk_emb
if not concat_spk_emb:
assert aux_channels == spk_emb_dim
else:
aux_channels = aux_channels + spk_emb_dim
noise_upsample = []
in_chs = in_channels
for noise_upsample_scale in noise_upsample_scales:
# NOTE(kan-bayashi): How should we design noise upsampling part?
noise_upsample += [
torch.nn.ConvTranspose1d(
in_chs,
channels,
noise_upsample_scale * 2,
stride=noise_upsample_scale,
padding=noise_upsample_scale // 2 + noise_upsample_scale % 2,
output_padding=noise_upsample_scale % 2,
bias=bias,
)
]
noise_upsample += [
getattr(torch.nn, noise_upsample_activation)(
**noise_upsample_activation_params
)
]
in_chs = channels
self.noise_upsample = torch.nn.Sequential(*noise_upsample)
self.noise_upsample_factor = np.prod(noise_upsample_scales)
self.blocks = torch.nn.ModuleList()
aux_chs = aux_channels
for upsample_scale in upsample_scales:
self.blocks += [
TADEResBlock(
in_channels=channels,
aux_channels=aux_chs,
kernel_size=kernel_size,
dilation=dilation,
bias=bias,
upsample_factor=upsample_scale,
upsample_mode=upsample_mode,
gated_function=gated_function,
),
]
aux_chs = channels
self.upsample_factor = np.prod(upsample_scales)
self.output_conv = torch.nn.Sequential(
torch.nn.Conv1d(
channels,
out_channels,
kernel_size,
1,
bias=bias,
padding=(kernel_size - 1) // 2,
),
torch.nn.Tanh(),
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, c, z=None):
"""Calculate forward propagation.
Args:
c (Tensor): Auxiliary input tensor (B, channels, T).
z (Tensor): Input noise tensor (B, in_channels, 1).
Returns:
Tensor: Output tensor (B, out_channels, T ** prod(upsample_scales)).
"""
# convert idx to embedding
assert c.size(1) == 2
c_idx, g_idx = c.long().split(1, dim=1)
c = self.emb(c_idx.squeeze(1)).transpose(1, 2) # (B, C, T)
g = self.spk_emb(g_idx[:, 0, 0])
# integrate global embedding
if not self.concat_spk_emb:
c = c + g.unsqueeze(2)
else:
g = g.unsqueeze(1).expand(-1, c.size(1), -1)
c = torch.cat([c, g], dim=-1)
if z is None:
z = torch.randn(c.size(0), self.in_channels, 1).to(
device=c.device,
dtype=c.dtype,
)
x = self.noise_upsample(z)
for block in self.blocks:
x, c = block(x, c)
x = self.output_conv(x)
return x
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def reset_parameters(self):
"""Reset parameters."""
def _reset_parameters(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def inference(self, c, g=None, normalize_before=False):
"""Perform inference.
Args:
c (Union[Tensor, ndarray]): Input tensor (T, 2).
Returns:
Tensor: Output tensor (T ** prod(upsample_scales), out_channels).
"""
assert not normalize_before, "No statistics are used."
if not isinstance(c, torch.Tensor):
c = torch.tensor(c, dtype=torch.long).to(next(self.parameters()).device)
if g is not None:
c = c[:, 0:1]
c = torch.cat([c, c.new_zeros(*c.size()).fill_(g).to(c.device)], dim=1)
c = c.transpose(1, 0).unsqueeze(0)
# convert idx to embedding
assert c.size(1) == 2
c_idx, g_idx = c.long().split(1, dim=1)
c = self.emb(c_idx.squeeze(1)).transpose(1, 2) # (B, C, T)
g = self.spk_emb(g_idx[:, 0, 0])
# integrate global embedding
if not self.concat_spk_emb:
c = c + g.unsqueeze(2)
else:
g = g.unsqueeze(1).expand(-1, c.size(1), -1)
c = torch.cat([c, g], dim=-1)
noise_size = (
1,
self.in_channels,
(c.size(2) - 1) // self.noise_upsample_factor + 1,
)
noise = torch.randn(*noise_size, dtype=torch.float).to(
next(self.parameters()).device
)
noise_up = self.noise_upsample(noise)
x = noise_up[:, :, : c.size(2)]
for block in self.blocks:
x, c = block(x, c)
x = self.output_conv(x)
return x.squeeze(0).transpose(1, 0)
| 20,714 | 33.353234 | 103 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/models/tf_models.py | # -*- coding: utf-8 -*-
# Copyright 2020 MINH ANH (@dathudeptrai)
# MIT License (https://opensource.org/licenses/MIT)
"""Tensorflow MelGAN modules complatible with pytorch."""
import numpy as np
import tensorflow as tf
from parallel_wavegan.layers.tf_layers import (
TFConvTranspose1d,
TFReflectionPad1d,
TFResidualStack,
)
class TFMelGANGenerator(tf.keras.layers.Layer):
"""Tensorflow MelGAN generator module."""
def __init__(
self,
in_channels=80,
out_channels=1,
kernel_size=7,
channels=512,
bias=True,
upsample_scales=[8, 8, 2, 2],
stack_kernel_size=3,
stacks=3,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
pad="ReflectionPad1d",
pad_params={},
use_final_nonlinear_activation=True,
use_weight_norm=True,
use_causal_conv=False,
):
"""Initialize TFMelGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of initial and final conv layer.
channels (int): Initial number of channels for conv layer.
bias (bool): Whether to add bias parameter in convolution layers.
upsample_scales (list): List of upsampling scales.
stack_kernel_size (int): Kernel size of dilated conv layers in residual stack.
stacks (int): Number of stacks in a single residual stack.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (dict): Hyperparameters for padding function.
use_final_nonlinear_activation (torch.nn.Module): Activation function for the final layer.
use_weight_norm (bool): No effect but keep it as is to be the same as pytorch version.
use_causal_conv (bool): Whether to use causal convolution.
"""
super(TFMelGANGenerator, self).__init__()
# check hyper parameters is valid
assert not use_causal_conv, "Not supported yet."
assert channels >= np.prod(upsample_scales)
assert channels % (2 ** len(upsample_scales)) == 0
assert pad == "ReflectionPad1d", f"Not supported (pad={pad})."
# add initial layer
layers = []
layers += [
TFReflectionPad1d((kernel_size - 1) // 2),
tf.keras.layers.Conv2D(
filters=channels,
kernel_size=(kernel_size, 1),
padding="valid",
use_bias=bias,
),
]
for i, upsample_scale in enumerate(upsample_scales):
# add upsampling layer
layers += [
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
),
TFConvTranspose1d(
channels=channels // (2 ** (i + 1)),
kernel_size=upsample_scale * 2,
stride=upsample_scale,
padding="same",
),
]
# add residual stack
for j in range(stacks):
layers += [
TFResidualStack(
kernel_size=stack_kernel_size,
channels=channels // (2 ** (i + 1)),
dilation=stack_kernel_size**j,
bias=bias,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
padding="same",
)
]
# add final layer
layers += [
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
),
TFReflectionPad1d((kernel_size - 1) // 2),
tf.keras.layers.Conv2D(
filters=out_channels, kernel_size=(kernel_size, 1), use_bias=bias
),
]
if use_final_nonlinear_activation:
layers += [tf.keras.layers.Activation("tanh")]
self.melgan = tf.keras.models.Sequential(layers)
# TODO(kan-bayashi): Fix hard coded dimension
@tf.function(
input_signature=[tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32)]
)
def call(self, c):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, T, in_channels).
Returns:
Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels).
"""
c = tf.expand_dims(c, 2)
c = self.melgan(c)
return c[:, :, 0, :]
| 4,922 | 34.417266 | 102 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/models/uhifigan.py | # -*- coding: utf-8 -*-
"""Unet-baed HiFi-GAN Modules.
This code is based on https://github.com/jik876/hifi-gan.
"""
import logging
import numpy as np
import torch
from parallel_wavegan.layers import CausalConv1d, CausalConvTranspose1d
from parallel_wavegan.layers import HiFiGANResidualBlock as ResidualBlock
from parallel_wavegan.utils import read_hdf5
class UHiFiGANGenerator(torch.nn.Module):
"""UHiFiGAN generator module."""
def __init__(
self,
in_channels=80,
out_channels=1,
channels=512,
kernel_size=7,
downsample_scales=(8, 8, 2, 2),
downsample_kernel_sizes=(16, 16, 4, 4),
upsample_scales=(8, 8, 2, 2),
upsample_kernel_sizes=(16, 16, 4, 4),
resblock_kernel_sizes=(3, 7, 11),
resblock_dilations=[(1, 3, 5), (1, 3, 5), (1, 3, 5)],
dropout=0.3,
use_additional_convs=True,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_causal_conv=False,
use_weight_norm=True,
):
"""Initialize Unet-based HiFiGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
channels (int): Number of hidden representation channels.
kernel_size (int): Kernel size of initial and final conv layer.
upsample_scales (list): List of upsampling scales.
upsample_kernel_sizes (list): List of kernel sizes for upsampling layers.
resblock_kernel_sizes (list): List of kernel sizes for residual blocks.
resblock_dilations (list): List of dilation list for residual blocks.
use_additional_convs (bool): Whether to use additional conv layers in residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_causal_conv (bool): Whether to use causal structure.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
# check hyperparameters are valid
assert kernel_size % 2 == 1, "Kernel size must be odd number."
assert len(upsample_scales) == len(upsample_kernel_sizes)
assert len(resblock_dilations) == len(resblock_kernel_sizes)
# define modules
self.num_upsamples = len(upsample_kernel_sizes)
self.num_blocks = len(resblock_kernel_sizes)
self.use_causal_conv = use_causal_conv
self.downsamples = torch.nn.ModuleList()
self.downsamples_mrf = torch.nn.ModuleList()
self.upsamples = torch.nn.ModuleList()
self.upsamples_mrf = torch.nn.ModuleList()
if not use_causal_conv:
self.input_conv = torch.nn.Sequential(
torch.nn.Conv1d(
out_channels,
channels,
kernel_size=kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
torch.nn.Dropout(dropout),
)
else:
self.input_conv = torch.nn.Sequential(
CausalConv1d(
out_channels,
channels,
kernel_size=kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
torch.nn.Dropout(dropout),
)
for i in range(len(downsample_scales)):
for j in range(len(resblock_kernel_sizes)):
self.downsamples_mrf += [
ResidualBlock(
kernel_size=resblock_kernel_sizes[j],
channels=channels,
dilations=resblock_dilations[j],
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
use_causal_conv=use_causal_conv,
)
]
if not use_causal_conv:
self.downsamples += [
torch.nn.Sequential(
torch.nn.Conv1d(
channels,
channels * 2,
kernel_size=downsample_kernel_sizes[i],
stride=downsample_scales[i],
bias=bias,
padding=downsample_scales[i] // 2
+ downsample_scales[i] % 2,
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.Dropout(dropout),
)
]
else:
self.downsamples += [
torch.nn.Sequential(
CausalConv1d(
channels,
channels * 2,
kernel_size=downsample_kernel_sizes[i],
stride=downsample_scales[i],
bias=bias,
padding=downsample_scales[i] // 2
+ downsample_scales[i] % 2,
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.Dropout(dropout),
)
]
channels = channels * 2
if not use_causal_conv:
self.hidden_conv = torch.nn.Conv1d(
in_channels,
channels,
kernel_size=kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
)
else:
self.hidden_conv = CausalConv1d(
in_channels,
channels,
kernel_size=kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
)
for i in range(len(upsample_kernel_sizes)):
# assert upsample_kernel_sizes[i] == 2 * upsample_scales[i]
if not use_causal_conv:
self.upsamples += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.ConvTranspose1d(
channels * 2,
channels // 2,
upsample_kernel_sizes[i],
upsample_scales[i],
padding=upsample_scales[i] // 2 + upsample_scales[i] % 2,
output_padding=upsample_scales[i] % 2,
bias=bias,
),
)
]
else:
self.upsamples += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
CausalConvTranspose1d(
channels * 2,
channels // 2,
upsample_kernel_sizes[i],
upsample_scales[i],
bias=bias,
),
)
]
# hidden_channel for MRF module
for j in range(len(resblock_kernel_sizes)):
self.upsamples_mrf += [
ResidualBlock(
kernel_size=resblock_kernel_sizes[j],
channels=channels // 2,
dilations=resblock_dilations[j],
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
use_causal_conv=use_causal_conv,
)
]
channels = channels // 2
if not use_causal_conv:
self.output_conv = torch.nn.Sequential(
# NOTE(kan-bayashi): follow official implementation but why
# using different slope parameter here? (0.1 vs. 0.01)
torch.nn.LeakyReLU(),
torch.nn.Conv1d(
channels,
out_channels,
kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
),
torch.nn.Tanh(),
)
else:
self.output_conv = torch.nn.Sequential(
# NOTE(kan-bayashi): follow official implementation but why
# using different slope parameter here? (0.1 vs. 0.01)
torch.nn.LeakyReLU(),
CausalConv1d(
channels,
out_channels,
kernel_size,
bias=bias,
),
torch.nn.Tanh(),
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, c=None, f0=None, excitation=None):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
f0 (Tensor): Input tensor (B, 1, T).
excitation (Tensor): Input tensor (B, frame_len, T).
Returns:
Tensor: Output tensor (B, out_channels, T).
"""
residual_results = []
hidden = self.input_conv(excitation)
for i in range(len(self.downsamples)):
cs = 0.0 # initialize
for j in range(self.num_blocks):
tc = self.downsamples_mrf[i * self.num_blocks + j](hidden)
cs += tc
hidden = cs / self.num_blocks
hidden = self.downsamples[i](hidden)
residual_results.append(hidden)
residual_results.reverse()
hidden_mel = self.hidden_conv(c)
for i in range(len(self.upsamples)):
hidden_mel = torch.cat((hidden_mel, residual_results[i]), dim=1)
hidden_mel = self.upsamples[i](hidden_mel)
cs = 0.0 # initialize
for j in range(self.num_blocks):
tc = self.upsamples_mrf[i * self.num_blocks + j](hidden_mel)
cs += tc
hidden_mel = cs / self.num_blocks
mel = self.output_conv(hidden_mel)
return mel
def reset_parameters(self):
"""Reset parameters.
This initialization follows the official implementation manner.
https://github.com/jik876/hifi-gan/blob/master/models.py
"""
def _reset_parameters(m):
if isinstance(m, (torch.nn.Conv1d, torch.nn.ConvTranspose1d)):
m.weight.data.normal_(0.0, 0.01)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def register_stats(self, stats):
"""Register stats for de-normalization as buffer.
Args:
stats (str): Path of statistics file (".npy" or ".h5").
"""
assert stats.endswith(".h5") or stats.endswith(".npy")
if stats.endswith(".h5"):
mean = read_hdf5(stats, "mean").reshape(-1)
scale = read_hdf5(stats, "scale").reshape(-1)
else:
mean = np.load(stats)[0].reshape(-1)
scale = np.load(stats)[1].reshape(-1)
self.register_buffer("mean", torch.from_numpy(mean).float())
self.register_buffer("scale", torch.from_numpy(scale).float())
logging.info("Successfully registered stats as buffer.")
def inference(self, excitation=None, f0=None, c=None, normalize_before=False):
"""Perform inference.
Args:
excitation (Union[Tensor, ndarray]): Excitation tensor.
f0 (Union[Tensor, ndarray]): F0 tensor.
c (Union[Tensor, ndarray]): Input tensor (T, in_channels).
normalize_before (bool): Whether to perform normalization.
Returns:
Tensor: Output tensor (T ** prod(upsample_scales), out_channels).
"""
if c is not None and not isinstance(c, torch.Tensor):
c = torch.tensor(c, dtype=torch.float).to(next(self.parameters()).device)
if excitation is not None and not isinstance(excitation, torch.Tensor):
excitation = torch.tensor(excitation, dtype=torch.float).to(
next(self.parameters()).device
)
if f0 is not None and not isinstance(f0, torch.Tensor):
f0 = torch.tensor(f0, dtype=torch.float).to(next(self.parameters()).device)
c = self.forward(
c.transpose(1, 0).unsqueeze(0),
f0.unsqueeze(1).transpose(1, 0).unsqueeze(0),
excitation.reshape(1, 1, -1),
)
return c.squeeze(0).transpose(1, 0)
| 14,674 | 36.822165 | 98 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/bin/decode.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Decode with trained Parallel WaveGAN Generator."""
import argparse
import logging
import os
import time
import numpy as np
import soundfile as sf
import torch
import yaml
from tqdm import tqdm
from parallel_wavegan.datasets import (
AudioDataset,
AudioSCPDataset,
MelDataset,
MelF0ExcitationDataset,
MelSCPDataset,
)
from parallel_wavegan.utils import load_model, read_hdf5
def main():
"""Run decoding process."""
parser = argparse.ArgumentParser(
description=(
"Decode dumped features with trained Parallel WaveGAN Generator "
"(See detail in parallel_wavegan/bin/decode.py)."
)
)
parser.add_argument(
"--scp",
default=None,
type=str,
help=(
"kaldi-style feats.scp file. "
"you need to specify either feats-scp or dumpdir."
),
)
parser.add_argument(
"--dumpdir",
default=None,
type=str,
help=(
"directory including feature files. "
"you need to specify either feats-scp or dumpdir."
),
)
parser.add_argument(
"--segments",
default=None,
type=str,
help="kaldi-style segments file.",
)
parser.add_argument(
"--outdir",
type=str,
required=True,
help="directory to save generated speech.",
)
parser.add_argument(
"--checkpoint",
type=str,
required=True,
help="checkpoint file to be loaded.",
)
parser.add_argument(
"--config",
default=None,
type=str,
help=(
"yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)"
),
)
parser.add_argument(
"--normalize-before",
default=False,
action="store_true",
help=(
"whether to perform feature normalization before input to the model. if"
" true, it assumes that the feature is de-normalized. this is useful when"
" text2mel model and vocoder use different feature statistics."
),
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
if args.config is None:
dirname = os.path.dirname(args.checkpoint)
args.config = os.path.join(dirname, "config.yml")
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
# check arguments
if (args.scp is not None and args.dumpdir is not None) or (
args.scp is None and args.dumpdir is None
):
raise ValueError("Please specify either --dumpdir or --feats-scp.")
# setup model
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = load_model(args.checkpoint, config)
logging.info(f"Loaded model parameters from {args.checkpoint}.")
if args.normalize_before:
assert hasattr(model, "mean"), "Feature stats are not registered."
assert hasattr(model, "scale"), "Feature stats are not registered."
model.remove_weight_norm()
model = model.eval().to(device)
model.to(device)
# check model type
generator_type = config.get("generator_type", "ParallelWaveGANGenerator")
use_aux_input = "VQVAE" not in generator_type
use_global_condition = config.get("use_global_condition", False)
use_local_condition = config.get("use_local_condition", False)
use_f0_and_excitation = generator_type == "UHiFiGANGenerator"
if use_aux_input:
############################
# MEL2WAV CASE #
############################
# setup dataset
if args.dumpdir is not None:
if config["format"] == "hdf5":
mel_query = "*.h5"
mel_load_fn = lambda x: read_hdf5(x, "feats") # NOQA
if use_f0_and_excitation:
f0_query = "*.h5"
f0_load_fn = lambda x: read_hdf5(x, "f0") # NOQA
excitation_query = "*.h5"
excitation_load_fn = lambda x: read_hdf5(x, "excitation") # NOQA
elif config["format"] == "npy":
mel_query = "*-feats.npy"
mel_load_fn = np.load
if use_f0_and_excitation:
f0_query = "*-f0.npy"
f0_load_fn = np.load
excitation_query = "*-excitation.npy"
excitation_load_fn = np.load
else:
raise ValueError("Support only hdf5 or npy format.")
if not use_f0_and_excitation:
dataset = MelDataset(
args.dumpdir,
mel_query=mel_query,
mel_load_fn=mel_load_fn,
return_utt_id=True,
)
else:
dataset = MelF0ExcitationDataset(
root_dir=args.dumpdir,
mel_query=mel_query,
f0_query=f0_query,
excitation_query=excitation_query,
mel_load_fn=mel_load_fn,
f0_load_fn=f0_load_fn,
excitation_load_fn=excitation_load_fn,
return_utt_id=True,
)
else:
if use_f0_and_excitation:
raise NotImplementedError(
"SCP format is not supported for f0 and excitation."
)
dataset = MelSCPDataset(
feats_scp=args.feats_scp,
return_utt_id=True,
)
logging.info(f"The number of features to be decoded = {len(dataset)}.")
# start generation
total_rtf = 0.0
with torch.no_grad(), tqdm(dataset, desc="[decode]") as pbar:
for idx, items in enumerate(pbar, 1):
if not use_f0_and_excitation:
utt_id, c = items
f0, excitation = None, None
else:
utt_id, c, f0, excitation = items
batch = dict(normalize_before=args.normalize_before)
if c is not None:
c = torch.tensor(c, dtype=torch.float).to(device)
batch.update(c=c)
if f0 is not None:
f0 = torch.tensor(f0, dtype=torch.float).to(device)
batch.update(f0=f0)
if excitation is not None:
excitation = torch.tensor(excitation, dtype=torch.float).to(device)
batch.update(excitation=excitation)
start = time.time()
y = model.inference(**batch).view(-1)
rtf = (time.time() - start) / (len(y) / config["sampling_rate"])
pbar.set_postfix({"RTF": rtf})
total_rtf += rtf
# save as PCM 16 bit wav file
sf.write(
os.path.join(config["outdir"], f"{utt_id}_gen.wav"),
y.cpu().numpy(),
config["sampling_rate"],
"PCM_16",
)
# report average RTF
logging.info(
f"Finished generation of {idx} utterances (RTF = {total_rtf / idx:.03f})."
)
else:
############################
# VQ-WAV2WAV CASE #
############################
# setup dataset
if args.dumpdir is not None:
local_query = None
local_load_fn = None
global_query = None
global_load_fn = None
if config["format"] == "hdf5":
audio_query = "*.h5"
audio_load_fn = lambda x: read_hdf5(x, "wave") # NOQA
if use_local_condition:
local_query = "*.h5"
local_load_fn = lambda x: read_hdf5(x, "local") # NOQA
if use_global_condition:
global_query = "*.h5"
global_load_fn = lambda x: read_hdf5(x, "global") # NOQA
elif config["format"] == "npy":
audio_query = "*-wave.npy"
audio_load_fn = np.load
if use_local_condition:
local_query = "*-local.npy"
local_load_fn = np.load
if use_global_condition:
global_query = "*-global.npy"
global_load_fn = np.load
else:
raise ValueError("support only hdf5 or npy format.")
dataset = AudioDataset(
args.dumpdir,
audio_query=audio_query,
audio_load_fn=audio_load_fn,
local_query=local_query,
local_load_fn=local_load_fn,
global_query=global_query,
global_load_fn=global_load_fn,
return_utt_id=True,
)
else:
if use_local_condition:
raise NotImplementedError("Not supported.")
if use_global_condition:
raise NotImplementedError("Not supported.")
dataset = AudioSCPDataset(
args.scp,
segments=args.segments,
return_utt_id=True,
)
logging.info(f"The number of features to be decoded = {len(dataset)}.")
# start generation
total_rtf = 0.0
text = os.path.join(config["outdir"], "text")
with torch.no_grad(), open(text, "w") as f, tqdm(
dataset, desc="[decode]"
) as pbar:
for idx, items in enumerate(pbar, 1):
# setup input
if use_local_condition and use_global_condition:
utt_id, x, l_, g = items
l_ = (
torch.from_numpy(l_)
.float()
.unsqueeze(0)
.transpose(1, 2)
.to(device)
)
g = torch.from_numpy(g).long().view(1).to(device)
elif use_local_condition:
utt_id, x, l_ = items
l_ = (
torch.from_numpy(l_)
.float()
.unsqueeze(0)
.transpose(1, 2)
.to(device)
)
g = None
elif use_global_condition:
utt_id, x, g = items
g = torch.from_numpy(g).long().view(1).to(device)
l_ = None
else:
utt_id, x = items
l_, g = None, None
x = torch.from_numpy(x).float().view(1, 1, -1).to(device)
# generate
start = time.time()
if config["generator_params"]["out_channels"] == 1:
z = model.encode(x)
y = model.decode(z, l_, g).view(-1).cpu().numpy()
else:
z = model.encode(model.pqmf.analysis(x))
y_ = model.decode(z, l_, g)
y = model.pqmf.synthesis(y_).view(-1).cpu().numpy()
rtf = (time.time() - start) / (len(y) / config["sampling_rate"])
pbar.set_postfix({"RTF": rtf})
total_rtf += rtf
# save as PCM 16 bit wav file
sf.write(
os.path.join(config["outdir"], f"{utt_id}_gen.wav"),
y,
config["sampling_rate"],
"PCM_16",
)
# save encode discrete symbols
symbols = " ".join([str(z) for z in z.view(-1).cpu().numpy()])
f.write(f"{utt_id} {symbols}\n")
# report average RTF
logging.info(
f"Finished generation of {idx} utterances (RTF = {total_rtf / idx:.03f})."
)
if __name__ == "__main__":
main()
| 13,111 | 34.342318 | 87 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/bin/train.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Train Parallel WaveGAN."""
import argparse
import logging
import os
import sys
from collections import defaultdict
import matplotlib
import numpy as np
import soundfile as sf
import torch
import yaml
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from tqdm import tqdm
import parallel_wavegan
import parallel_wavegan.models
import parallel_wavegan.optimizers
from parallel_wavegan.datasets import (
AudioDataset,
AudioMelDataset,
AudioMelF0ExcitationDataset,
AudioMelSCPDataset,
AudioSCPDataset,
)
from parallel_wavegan.layers import PQMF
from parallel_wavegan.losses import (
DiscriminatorAdversarialLoss,
DurationPredictorLoss,
FeatureMatchLoss,
GeneratorAdversarialLoss,
MelSpectrogramLoss,
MultiResolutionSTFTLoss,
)
from parallel_wavegan.utils import read_hdf5
# set to avoid matplotlib error in CLI environment
matplotlib.use("Agg")
class Trainer(object):
"""Customized trainer module for Parallel WaveGAN training."""
def __init__(
self,
steps,
epochs,
data_loader,
sampler,
model,
criterion,
optimizer,
scheduler,
config,
device=torch.device("cpu"),
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
data_loader (dict): Dict of data loaders. It must contrain "train" and "dev" loaders.
model (dict): Dict of models. It must contrain "generator" and "discriminator" models.
criterion (dict): Dict of criterions. It must contrain "stft" and "mse" criterions.
optimizer (dict): Dict of optimizers. It must contrain "generator" and "discriminator" optimizers.
scheduler (dict): Dict of schedulers. It must contrain "generator" and "discriminator" schedulers.
config (dict): Config dict loaded from yaml format configuration file.
device (torch.deive): Pytorch device instance.
"""
self.steps = steps
self.epochs = epochs
self.data_loader = data_loader
self.sampler = sampler
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.scheduler = scheduler
self.config = config
self.device = device
self.writer = SummaryWriter(config["outdir"])
self.finish_train = False
self.total_train_loss = defaultdict(float)
self.total_eval_loss = defaultdict(float)
self.is_vq = "VQVAE" in config.get("generator_type", "ParallelWaveGANGenerator")
self.use_duration_prediction = "Duration" in config.get(
"generator_type", "ParallelWaveGANGenerator"
)
def run(self):
"""Run training."""
self.tqdm = tqdm(
initial=self.steps, total=self.config["train_max_steps"], desc="[train]"
)
while True:
# train one epoch
self._train_epoch()
# check whether training is finished
if self.finish_train:
break
self.tqdm.close()
logging.info("Finished training.")
def save_checkpoint(self, checkpoint_path):
"""Save checkpoint.
Args:
checkpoint_path (str): Checkpoint path to be saved.
"""
state_dict = {
"optimizer": {
"generator": self.optimizer["generator"].state_dict(),
"discriminator": self.optimizer["discriminator"].state_dict(),
},
"scheduler": {
"generator": self.scheduler["generator"].state_dict(),
"discriminator": self.scheduler["discriminator"].state_dict(),
},
"steps": self.steps,
"epochs": self.epochs,
}
if self.config["distributed"]:
state_dict["model"] = {
"generator": self.model["generator"].module.state_dict(),
"discriminator": self.model["discriminator"].module.state_dict(),
}
else:
state_dict["model"] = {
"generator": self.model["generator"].state_dict(),
"discriminator": self.model["discriminator"].state_dict(),
}
if not os.path.exists(os.path.dirname(checkpoint_path)):
os.makedirs(os.path.dirname(checkpoint_path))
torch.save(state_dict, checkpoint_path)
def load_checkpoint(self, checkpoint_path, load_only_params=False):
"""Load checkpoint.
Args:
checkpoint_path (str): Checkpoint path to be loaded.
load_only_params (bool): Whether to load only model parameters.
"""
state_dict = torch.load(checkpoint_path, map_location="cpu")
if self.config["distributed"]:
self.model["generator"].module.load_state_dict(
state_dict["model"]["generator"],
)
self.model["discriminator"].module.load_state_dict(
state_dict["model"]["discriminator"],
strict=False,
)
else:
self.model["generator"].load_state_dict(
state_dict["model"]["generator"],
)
self.model["discriminator"].load_state_dict(
state_dict["model"]["discriminator"],
strict=False,
)
if not load_only_params:
self.steps = state_dict["steps"]
self.epochs = state_dict["epochs"]
self.optimizer["generator"].load_state_dict(
state_dict["optimizer"]["generator"]
)
self.optimizer["discriminator"].load_state_dict(
state_dict["optimizer"]["discriminator"]
)
self.scheduler["generator"].load_state_dict(
state_dict["scheduler"]["generator"]
)
self.scheduler["discriminator"].load_state_dict(
state_dict["scheduler"]["discriminator"]
)
def _train_step(self, batch):
"""Train model one step."""
# parse batch and send to device
if self.use_duration_prediction:
x, y, ds = self._parse_batch(batch)
else:
x, y = self._parse_batch(batch)
#######################
# Generator #
#######################
if self.steps > self.config.get("generator_train_start_steps", 0):
# initialize
gen_loss = 0.0
if self.is_vq:
# vq case
if self.config["generator_params"]["in_channels"] == 1:
y_, z_e, z_q = self.model["generator"](y, *x)
else:
y_mb = self.criterion["pqmf"].analysis(y)
y_, z_e, z_q = self.model["generator"](y_mb, *x)
quantize_loss = self.criterion["mse"](z_q, z_e.detach())
commit_loss = self.criterion["mse"](z_e, z_q.detach())
self.total_train_loss["train/quantization_loss"] += quantize_loss.item()
self.total_train_loss["train/commitment_loss"] += commit_loss.item()
gen_loss += quantize_loss + self.config["lambda_commit"] * commit_loss
elif self.use_duration_prediction:
assert ds is not None
y_, ds_ = self.model["generator"](x, ds)
duration_loss = self.criterion["duration"](ds_, ds)
self.total_train_loss["train/duration_loss"] += duration_loss.item()
gen_loss += duration_loss
else:
y_ = self.model["generator"](*x)
# reconstruct the signal from multi-band signal
if self.config["generator_params"]["out_channels"] > 1:
y_mb_ = y_
y_ = self.criterion["pqmf"].synthesis(y_mb_)
# multi-resolution sfft loss
if self.config["use_stft_loss"]:
sc_loss, mag_loss = self.criterion["stft"](y_, y)
gen_loss += sc_loss + mag_loss
self.total_train_loss[
"train/spectral_convergence_loss"
] += sc_loss.item()
self.total_train_loss[
"train/log_stft_magnitude_loss"
] += mag_loss.item()
# subband multi-resolution stft loss
if self.config["use_subband_stft_loss"]:
gen_loss *= 0.5 # for balancing with subband stft loss
if not self.is_vq:
y_mb = self.criterion["pqmf"].analysis(y)
sub_sc_loss, sub_mag_loss = self.criterion["sub_stft"](y_mb_, y_mb)
gen_loss += 0.5 * (sub_sc_loss + sub_mag_loss)
self.total_train_loss[
"train/sub_spectral_convergence_loss"
] += sub_sc_loss.item()
self.total_train_loss[
"train/sub_log_stft_magnitude_loss"
] += sub_mag_loss.item()
# mel spectrogram loss
if self.config["use_mel_loss"]:
mel_loss = self.criterion["mel"](y_, y)
gen_loss += mel_loss
self.total_train_loss["train/mel_loss"] += mel_loss.item()
# weighting aux loss
gen_loss *= self.config.get("lambda_aux", 1.0)
# adversarial loss
if self.steps > self.config["discriminator_train_start_steps"]:
p_ = self.model["discriminator"](y_)
adv_loss = self.criterion["gen_adv"](p_)
self.total_train_loss["train/adversarial_loss"] += adv_loss.item()
# feature matching loss
if self.config["use_feat_match_loss"]:
# no need to track gradients
with torch.no_grad():
p = self.model["discriminator"](y)
fm_loss = self.criterion["feat_match"](p_, p)
self.total_train_loss[
"train/feature_matching_loss"
] += fm_loss.item()
adv_loss += self.config["lambda_feat_match"] * fm_loss
# add adversarial loss to generator loss
gen_loss += self.config["lambda_adv"] * adv_loss
self.total_train_loss["train/generator_loss"] += gen_loss.item()
# update generator
self.optimizer["generator"].zero_grad()
gen_loss.backward()
if self.config["generator_grad_norm"] > 0:
torch.nn.utils.clip_grad_norm_(
self.model["generator"].parameters(),
self.config["generator_grad_norm"],
)
self.optimizer["generator"].step()
self.scheduler["generator"].step()
#######################
# Discriminator #
#######################
if self.steps > self.config["discriminator_train_start_steps"]:
if self.config.get("update_prediction_after_generator_update", True):
# re-compute y_ which leads better quality
with torch.no_grad():
if self.is_vq:
if self.config["generator_params"]["in_channels"] == 1:
y_, _, _ = self.model["generator"](y, *x)
else:
y_, _, _ = self.model["generator"](y_mb, *x)
elif self.use_duration_prediction:
assert ds is not None
y_, _ = self.model["generator"](x, ds)
else:
y_ = self.model["generator"](*x)
if self.config["generator_params"]["out_channels"] > 1:
y_ = self.criterion["pqmf"].synthesis(y_)
# discriminator loss
p = self.model["discriminator"](y)
p_ = self.model["discriminator"](y_.detach())
real_loss, fake_loss = self.criterion["dis_adv"](p_, p)
dis_loss = real_loss + fake_loss
self.total_train_loss["train/real_loss"] += real_loss.item()
self.total_train_loss["train/fake_loss"] += fake_loss.item()
self.total_train_loss["train/discriminator_loss"] += dis_loss.item()
# update discriminator
self.optimizer["discriminator"].zero_grad()
dis_loss.backward()
if self.config["discriminator_grad_norm"] > 0:
torch.nn.utils.clip_grad_norm_(
self.model["discriminator"].parameters(),
self.config["discriminator_grad_norm"],
)
self.optimizer["discriminator"].step()
self.scheduler["discriminator"].step()
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
def _train_epoch(self):
"""Train model one epoch."""
for train_steps_per_epoch, batch in enumerate(self.data_loader["train"], 1):
# train one step
self._train_step(batch)
# check interval
if self.config["rank"] == 0:
self._check_log_interval()
self._check_eval_interval()
self._check_save_interval()
# check whether training is finished
if self.finish_train:
return
# update
self.epochs += 1
self.train_steps_per_epoch = train_steps_per_epoch
logging.info(
f"(Steps: {self.steps}) Finished {self.epochs} epoch training "
f"({self.train_steps_per_epoch} steps per epoch)."
)
# needed for shuffle in distributed training
if self.config["distributed"]:
self.sampler["train"].set_epoch(self.epochs)
@torch.no_grad()
def _eval_step(self, batch):
"""Evaluate model one step."""
# parse batch and send to device
if self.use_duration_prediction:
x, y, ds = self._parse_batch(batch)
else:
x, y = self._parse_batch(batch)
#######################
# Generator #
#######################
if self.is_vq:
if self.config["generator_params"]["in_channels"] == 1:
y_, z_e, z_q = self.model["generator"](y, *x)
else:
y_mb = self.criterion["pqmf"].analysis(y)
y_, z_e, z_q = self.model["generator"](y_mb, *x)
quantize_loss = self.criterion["mse"](z_q, z_e.detach())
commit_loss = self.criterion["mse"](z_e, z_q.detach())
elif self.use_duration_prediction:
assert ds is not None
y_, ds_ = self.model["generator"](x, ds)
duration_loss = self.criterion["duration"](ds_, torch.log(ds))
else:
y_ = self.model["generator"](*x)
if self.config["generator_params"]["out_channels"] > 1:
y_mb_ = y_
y_ = self.criterion["pqmf"].synthesis(y_mb_)
# initialize
aux_loss = 0.0
# multi-resolution stft loss
if self.config["use_stft_loss"]:
sc_loss, mag_loss = self.criterion["stft"](y_, y)
aux_loss += sc_loss + mag_loss
self.total_eval_loss["eval/spectral_convergence_loss"] += sc_loss.item()
self.total_eval_loss["eval/log_stft_magnitude_loss"] += mag_loss.item()
# subband multi-resolution stft loss
if self.config.get("use_subband_stft_loss", False):
aux_loss *= 0.5 # for balancing with subband stft loss
if not self.is_vq:
y_mb = self.criterion["pqmf"].analysis(y)
sub_sc_loss, sub_mag_loss = self.criterion["sub_stft"](y_mb_, y_mb)
self.total_eval_loss[
"eval/sub_spectral_convergence_loss"
] += sub_sc_loss.item()
self.total_eval_loss[
"eval/sub_log_stft_magnitude_loss"
] += sub_mag_loss.item()
aux_loss += 0.5 * (sub_sc_loss + sub_mag_loss)
# mel spectrogram loss
if self.config["use_mel_loss"]:
mel_loss = self.criterion["mel"](y_, y)
aux_loss += mel_loss
self.total_eval_loss["eval/mel_loss"] += mel_loss.item()
# weighting stft loss
aux_loss *= self.config.get("lambda_aux", 1.0)
# adversarial loss
p_ = self.model["discriminator"](y_)
adv_loss = self.criterion["gen_adv"](p_)
gen_loss = aux_loss + self.config["lambda_adv"] * adv_loss
# feature matching loss
if self.config["use_feat_match_loss"]:
p = self.model["discriminator"](y)
fm_loss = self.criterion["feat_match"](p_, p)
self.total_eval_loss["eval/feature_matching_loss"] += fm_loss.item()
gen_loss += (
self.config["lambda_adv"] * self.config["lambda_feat_match"] * fm_loss
)
#######################
# Discriminator #
#######################
p = self.model["discriminator"](y)
p_ = self.model["discriminator"](y_)
# discriminator loss
real_loss, fake_loss = self.criterion["dis_adv"](p_, p)
dis_loss = real_loss + fake_loss
# add to total eval loss
self.total_eval_loss["eval/adversarial_loss"] += adv_loss.item()
self.total_eval_loss["eval/generator_loss"] += gen_loss.item()
self.total_eval_loss["eval/real_loss"] += real_loss.item()
self.total_eval_loss["eval/fake_loss"] += fake_loss.item()
self.total_eval_loss["eval/discriminator_loss"] += dis_loss.item()
if self.is_vq:
self.total_eval_loss["eval/quantization_loss"] += quantize_loss.item()
self.total_eval_loss["eval/commitment_loss"] += commit_loss.item()
if self.use_duration_prediction:
self.total_eval_loss["eval/duration_loss"] += duration_loss.item()
def _eval_epoch(self):
"""Evaluate model one epoch."""
logging.info(f"(Steps: {self.steps}) Start evaluation.")
# change mode
for key in self.model.keys():
self.model[key].eval()
# calculate loss for each batch
for eval_steps_per_epoch, batch in enumerate(
tqdm(self.data_loader["dev"], desc="[eval]"), 1
):
# eval one step
self._eval_step(batch)
# save intermediate result
if eval_steps_per_epoch == 1:
self._genearete_and_save_intermediate_result(batch)
logging.info(
f"(Steps: {self.steps}) Finished evaluation "
f"({eval_steps_per_epoch} steps per epoch)."
)
# average loss
for key in self.total_eval_loss.keys():
self.total_eval_loss[key] /= eval_steps_per_epoch
logging.info(
f"(Steps: {self.steps}) {key} = {self.total_eval_loss[key]:.4f}."
)
# record
self._write_to_tensorboard(self.total_eval_loss)
# reset
self.total_eval_loss = defaultdict(float)
# restore mode
for key in self.model.keys():
self.model[key].train()
@torch.no_grad()
def _genearete_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
# delayed import to avoid error related backend error
import matplotlib.pyplot as plt
# parse batch and send to device
if self.use_duration_prediction:
x_batch, y_batch, _ = self._parse_batch(batch)
else:
x_batch, y_batch = self._parse_batch(batch)
# generate
if self.is_vq:
if self.config["generator_params"]["in_channels"] == 1:
y_batch_, _, _ = self.model["generator"](y_batch, *x_batch)
else:
y_batch_, _, _ = self.model["generator"](
self.criterion["pqmf"].analysis(y_batch), *x_batch
)
elif self.use_duration_prediction:
y_batch_, _ = self.model["generator"].synthesis(x_batch)
else:
y_batch_ = self.model["generator"](*x_batch)
if self.config["generator_params"]["out_channels"] > 1:
y_batch_ = self.criterion["pqmf"].synthesis(y_batch_)
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (y, y_) in enumerate(zip(y_batch, y_batch_), 1):
# convert to ndarray
y, y_ = y.view(-1).cpu().numpy(), y_.view(-1).cpu().numpy()
# plot figure and save it
figname = os.path.join(dirname, f"{idx}.png")
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_)
plt.title(f"generated speech @ {self.steps} steps")
plt.tight_layout()
plt.savefig(figname)
plt.close()
# save as wavfile
y = np.clip(y, -1, 1)
y_ = np.clip(y_, -1, 1)
sf.write(
figname.replace(".png", "_ref.wav"),
y,
self.config["sampling_rate"],
"PCM_16",
)
sf.write(
figname.replace(".png", "_gen.wav"),
y_,
self.config["sampling_rate"],
"PCM_16",
)
if idx >= self.config["num_save_intermediate_results"]:
break
def _parse_batch(self, batch):
"""Parse batch and send to the device."""
# parse batch
if self.use_duration_prediction:
inputs, targets, durations = batch
else:
inputs, targets = batch
# send inputs to device
if isinstance(inputs, torch.Tensor):
x = inputs.to(self.device)
elif isinstance(inputs, (tuple, list)):
x = [None if x is None else x.to(self.device) for x in inputs]
else:
raise ValueError(f"Not supported type ({type(inputs)}).")
# send targets to device
if isinstance(targets, torch.Tensor):
y = targets.to(self.device)
elif isinstance(targets, (tuple, list)):
y = [None if y is None else y.to(self.device) for y in targets]
else:
raise ValueError(f"Not supported type ({type(targets)}).")
if self.use_duration_prediction:
# send durations to device (for model with duration prediction only)
if isinstance(durations, torch.Tensor):
ds = durations.to(self.device)
elif isinstance(durations, (tuple, list)):
ds = [None if d is None else d.to(self.device) for d in durations]
else:
raise ValueError(f"Not supported type ({type(durations)}).")
return x, y, ds
return x, y
def _write_to_tensorboard(self, loss):
"""Write to tensorboard."""
for key, value in loss.items():
self.writer.add_scalar(key, value, self.steps)
def _check_save_interval(self):
if self.steps % self.config["save_interval_steps"] == 0:
self.save_checkpoint(
os.path.join(self.config["outdir"], f"checkpoint-{self.steps}steps.pkl")
)
logging.info(f"Successfully saved checkpoint @ {self.steps} steps.")
def _check_eval_interval(self):
if self.steps % self.config["eval_interval_steps"] == 0:
self._eval_epoch()
def _check_log_interval(self):
if self.steps % self.config["log_interval_steps"] == 0:
for key in self.total_train_loss.keys():
self.total_train_loss[key] /= self.config["log_interval_steps"]
logging.info(
f"(Steps: {self.steps}) {key} = {self.total_train_loss[key]:.4f}."
)
self._write_to_tensorboard(self.total_train_loss)
# reset
self.total_train_loss = defaultdict(float)
def _check_train_finish(self):
if self.steps >= self.config["train_max_steps"]:
self.finish_train = True
class Collater(object):
"""Customized collater for Pytorch DataLoader in training."""
def __init__(
self,
batch_max_steps=20480,
hop_size=256,
aux_context_window=2,
use_noise_input=False,
use_f0_and_excitation=False,
use_aux_input=True,
use_duration=False,
use_global_condition=False,
use_local_condition=False,
pad_value=0,
):
"""Initialize customized collater for PyTorch DataLoader.
Args:
batch_max_steps (int): The maximum length of input signal in batch.
hop_size (int): Hop size of auxiliary features.
aux_context_window (int): Context window size for auxiliary feature conv.
use_noise_input (bool): Whether to use noise input.
use_f0_and_excitation (bool): Whether to use f0 and ext. input.
use_aux_input (bool): Whether to use auxiliary input.
use_duration (bool): Whether to use duration for duration prediction.
use_global_condition (bool): Whether to use global conditioning.
use_local_condition (bool): Whether to use local conditioning.
"""
if hop_size is not None:
if batch_max_steps % hop_size != 0:
batch_max_steps += -(batch_max_steps % hop_size)
assert batch_max_steps % hop_size == 0
self.hop_size = hop_size
self.batch_max_frames = batch_max_steps // hop_size
self.batch_max_steps = batch_max_steps
self.aux_context_window = aux_context_window
self.use_noise_input = use_noise_input
self.use_f0_and_excitation = use_f0_and_excitation
self.use_aux_input = use_aux_input
self.use_duration = use_duration
self.use_global_condition = use_global_condition
self.use_local_condition = use_local_condition
self.pad_value = pad_value
if not self.use_aux_input:
assert not self.use_noise_input, "Not supported."
assert not self.use_duration, "Not supported."
if self.use_noise_input:
assert not self.use_duration, "Not supported."
if self.use_local_condition:
assert not self.use_aux_input and not self.use_duration, "Not supported."
if self.use_global_condition:
assert not self.use_aux_input and not self.use_duration, "Not supported."
# set useful values in random cutting
if self.use_aux_input or self.use_local_condition:
self.start_offset = aux_context_window
self.end_offset = -(self.batch_max_frames + aux_context_window)
self.mel_threshold = self.batch_max_frames + 2 * aux_context_window
else:
self.start_offset = 0
self.end_offset = -self.batch_max_steps
self.audio_threshold = self.batch_max_steps
def __call__(self, batch):
"""Convert into batch tensors.
Args:
batch (list): list of tuple of the pair of audio and features.
Returns:
Tuple: Tuple of Gaussian noise batch (B, 1, T) and auxiliary feature
batch (B, C, T'), where T = (T' - 2 * aux_context_window) * hop_size.
If use_noise_input = False, Gaussian noise batch is not included.
If use_aux_input = False, auxiliary feature batch is not included.
If both use_noise_input and use_aux_input to False, this tuple is
not returned.
Tensor: Target signal batch (B, 1, T).
"""
if self.use_aux_input:
#################################
# MEL2WAV CASE #
#################################
# check length
batch = [
self._adjust_length(*b) for b in batch if len(b[1]) > self.mel_threshold
]
xs, cs = [b[0] for b in batch], [b[1] for b in batch]
if self.use_f0_and_excitation:
fs, es = [b[2] for b in batch], [b[3] for b in batch]
# make batch with random cut
c_lengths = [len(c) for c in cs]
start_frames = np.array(
[
np.random.randint(self.start_offset, cl + self.end_offset)
for cl in c_lengths
]
)
x_starts = start_frames * self.hop_size
x_ends = x_starts + self.batch_max_steps
c_starts = start_frames - self.aux_context_window
c_ends = start_frames + self.batch_max_frames + self.aux_context_window
y_batch = [x[start:end] for x, start, end in zip(xs, x_starts, x_ends)]
c_batch = [c[start:end] for c, start, end in zip(cs, c_starts, c_ends)]
# convert each batch to tensor, asuume that each item in batch has the same length
y_batch, c_batch = np.array(y_batch), np.array(c_batch)
y_batch = torch.tensor(y_batch, dtype=torch.float).unsqueeze(1) # (B, 1, T)
if self.use_f0_and_excitation:
f_batch = [f[start:end] for f, start, end in zip(fs, c_starts, c_ends)]
e_batch = [e[start:end] for e, start, end in zip(es, c_starts, c_ends)]
f_batch, e_batch = np.array(f_batch), np.array(e_batch)
f_batch = torch.tensor(f_batch, dtype=torch.float).unsqueeze(
1
) # (B, 1, T')
e_batch = torch.tensor(e_batch, dtype=torch.float) # (B, 1, T', C')
e_batch = e_batch.reshape(e_batch.shape[0], 1, -1) # (B, 1, T' * C')
# duration calculation and return with duration information
if self.use_duration:
updated_c_batch, d_batch = [], []
for c in c_batch:
# NOTE(jiatong): assume 0 is the discrete symbol
# (refer to cvss_c/local/preprocess_hubert.py)
code, d = torch.unique_consecutive(
torch.tensor(c, dtype=torch.long), return_counts=True, dim=0
)
updated_c_batch.append(code)
d_batch.append(d)
c_batch = self._pad_list(updated_c_batch, self.pad_value).transpose(
2, 1
) # (B, C, T')
d_batch = self._pad_list(d_batch, 0)
return c_batch, y_batch, d_batch
# process data without duration prediction
c_batch = torch.tensor(c_batch, dtype=torch.float).transpose(
2, 1
) # (B, C, T')
input_items = (c_batch,)
if self.use_noise_input:
# make input noise signal batch tensor
z_batch = torch.randn(y_batch.size()) # (B, 1, T)
input_items = (z_batch,) + input_items
if self.use_f0_and_excitation:
input_items = input_items + (f_batch, e_batch)
return input_items, y_batch
else:
#################################
# VQ-WAV2WAV CASE #
#################################
if self.use_local_condition:
# check length
batch_idx = [
idx
for idx, b in enumerate(batch)
if len(b[1]) >= self.mel_threshold
]
# fix length
batch_ = [
self._adjust_length(batch[idx][0], batch[idx][1])
for idx in batch_idx
]
# decide random index
l_lengths = [len(b[1]) for b in batch_]
l_starts = np.array(
[
np.random.randint(self.start_offset, ll + self.end_offset)
for ll in l_lengths
]
)
l_ends = l_starts + self.batch_max_frames
y_starts = l_starts * self.hop_size
y_ends = y_starts + self.batch_max_steps
# make random batch
y_batch = [
b[0][start:end] for b, start, end in zip(batch_, y_starts, y_ends)
]
l_batch = [
b[1][start:end] for b, start, end in zip(batch_, l_starts, l_ends)
]
if self.use_global_condition:
g_batch = [batch[idx][2].reshape(1) for idx in batch_idx]
else:
# check length
if self.use_global_condition:
batch = [b for b in batch if len(b[0]) >= self.audio_threshold]
else:
batch = [(b,) for b in batch if len(b) >= self.audio_threshold]
# decide random index
y_lengths = [len(b[0]) for b in batch]
y_starts = np.array(
[
np.random.randint(self.start_offset, yl + self.end_offset)
for yl in y_lengths
]
)
y_ends = y_starts + self.batch_max_steps
# make random batch
y_batch = [
b[0][start:end] for b, start, end in zip(batch, y_starts, y_ends)
]
if self.use_global_condition:
g_batch = [b[1].reshape(1) for b in batch]
# convert each batch to tensor, asuume that each item in batch has the same length
y_batch = torch.tensor(y_batch, dtype=torch.float).unsqueeze(1) # (B, 1, T)
if self.use_local_condition:
l_batch = torch.tensor(l_batch, dtype=torch.float).transpose(
2, 1
) # (B, C' T')
else:
l_batch = None
if self.use_global_condition:
g_batch = torch.tensor(g_batch, dtype=torch.long).view(-1) # (B,)
else:
g_batch = None
# NOTE(kan-bayashi): Always return "l" and "g" since VQ-VAE can accept None
return (l_batch, g_batch), y_batch
def _adjust_length(self, x, c, f0=None, excitation=None):
"""Adjust the audio and feature lengths.
Note:
Basically we assume that the length of x and c are adjusted
through preprocessing stage, but if we use other library processed
features, this process will be needed.
"""
if len(x) < len(c) * self.hop_size:
x = np.pad(x, (0, len(c) * self.hop_size - len(x)), mode="edge")
# check the legnth is valid
assert len(x) == len(c) * self.hop_size
if f0 is not None and excitation is not None:
return x, c, f0, excitation
else:
return x, c
def _pad_list(self, xs, pad_value):
"""Perform padding for the list of tensors.
Args:
xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
pad_value (float): Value for padding.
Returns:
Tensor: Padded tensor (B, Tmax, `*`).
Examples:
>>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
>>> x
[tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
>>> pad_list(x, 0)
tensor([[1., 1., 1., 1.],
[1., 1., 0., 0.],
[1., 0., 0., 0.]])
"""
n_batch = len(xs)
max_len = max(x.size(0) for x in xs)
pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
for i in range(n_batch):
pad[i, : xs[i].size(0)] = xs[i]
return pad
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description=(
"Train Parallel WaveGAN (See detail in parallel_wavegan/bin/train.py)."
)
)
parser.add_argument(
"--train-wav-scp",
default=None,
type=str,
help=(
"kaldi-style wav.scp file for training. "
"you need to specify either train-*-scp or train-dumpdir."
),
)
parser.add_argument(
"--train-feats-scp",
default=None,
type=str,
help=(
"kaldi-style feats.scp file for training. "
"you need to specify either train-*-scp or train-dumpdir."
),
)
parser.add_argument(
"--train-segments",
default=None,
type=str,
help="kaldi-style segments file for training.",
)
parser.add_argument(
"--train-dumpdir",
default=None,
type=str,
help=(
"directory including training data. "
"you need to specify either train-*-scp or train-dumpdir."
),
)
parser.add_argument(
"--dev-wav-scp",
default=None,
type=str,
help=(
"kaldi-style wav.scp file for validation. "
"you need to specify either dev-*-scp or dev-dumpdir."
),
)
parser.add_argument(
"--dev-feats-scp",
default=None,
type=str,
help=(
"kaldi-style feats.scp file for vaidation. "
"you need to specify either dev-*-scp or dev-dumpdir."
),
)
parser.add_argument(
"--dev-segments",
default=None,
type=str,
help="kaldi-style segments file for validation.",
)
parser.add_argument(
"--dev-dumpdir",
default=None,
type=str,
help=(
"directory including development data. "
"you need to specify either dev-*-scp or dev-dumpdir."
),
)
parser.add_argument(
"--outdir",
type=str,
required=True,
help="directory to save checkpoints.",
)
parser.add_argument(
"--config",
type=str,
required=True,
help="yaml format configuration file.",
)
parser.add_argument(
"--pretrain",
default="",
type=str,
nargs="?",
help='checkpoint file path to load pretrained params. (default="")',
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--rank",
"--local_rank",
default=0,
type=int,
help="rank for distributed training. no need to explictly specify.",
)
args = parser.parse_args()
args.distributed = False
if not torch.cuda.is_available():
device = torch.device("cpu")
else:
device = torch.device("cuda")
# effective when using fixed size inputs
# see https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936
torch.backends.cudnn.benchmark = True
torch.cuda.set_device(args.rank)
# setup for distributed training
# see example: https://github.com/NVIDIA/apex/tree/master/examples/simple/distributed
if "WORLD_SIZE" in os.environ:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1
if args.distributed:
torch.distributed.init_process_group(backend="nccl", init_method="env://")
# suppress logging for distributed training
if args.rank != 0:
sys.stdout = open(os.devnull, "w")
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if (args.train_feats_scp is not None and args.train_dumpdir is not None) or (
args.train_feats_scp is None and args.train_dumpdir is None
):
raise ValueError("Please specify either --train-dumpdir or --train-*-scp.")
if (args.dev_feats_scp is not None and args.dev_dumpdir is not None) or (
args.dev_feats_scp is None and args.dev_dumpdir is None
):
raise ValueError("Please specify either --dev-dumpdir or --dev-*-scp.")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = parallel_wavegan.__version__ # add version info
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get configuration
generator_type = config.get("generator_type", "ParallelWaveGANGenerator")
use_aux_input = "VQVAE" not in generator_type
use_noise_input = (
"ParallelWaveGAN" in generator_type and "VQVAE" not in generator_type
)
use_duration = "Duration" in generator_type
use_local_condition = config.get("use_local_condition", False)
use_global_condition = config.get("use_global_condition", False)
use_f0_and_excitation = generator_type == "UHiFiGANGenerator"
# setup query and load function
if args.train_wav_scp is None or args.dev_wav_scp is None:
local_query = None
local_load_fn = None
global_query = None
global_load_fn = None
if config["format"] == "hdf5":
audio_query, mel_query = "*.h5", "*.h5"
audio_load_fn = lambda x: read_hdf5(x, "wave") # NOQA
mel_load_fn = lambda x: read_hdf5(x, "feats") # NOQA
if use_f0_and_excitation:
f0_query, excitation_query = "*.h5", "*.h5"
f0_load_fn = lambda x: read_hdf5(x, "f0") # NOQA
excitation_load_fn = lambda x: read_hdf5(x, "excitation") # NOQA
if use_local_condition:
local_query = "*.h5"
local_load_fn = lambda x: read_hdf5(x, "local") # NOQA
if use_global_condition:
global_query = "*.h5"
global_load_fn = lambda x: read_hdf5(x, "global") # NOQA
elif config["format"] == "npy":
audio_query, mel_query = "*-wave.npy", "*-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
if use_f0_and_excitation:
f0_query, excitation_query = "*-f0.npy", "*-excitation.npy"
f0_load_fn = np.load
excitation_load_fn = np.load
if use_local_condition:
local_query = "*-local.npy"
local_load_fn = np.load
if use_global_condition:
global_query = "*-global.npy"
global_load_fn = np.load
else:
raise ValueError("support only hdf5 or npy format.")
# setup length threshold
if config["remove_short_samples"]:
audio_length_threshold = config["batch_max_steps"]
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
audio_length_threshold = None
# define dataset for training data
if args.train_dumpdir is not None:
if not use_f0_and_excitation:
if use_aux_input:
train_dataset = AudioMelDataset(
root_dir=args.train_dumpdir,
audio_query=audio_query,
audio_load_fn=audio_load_fn,
mel_query=mel_query,
mel_load_fn=mel_load_fn,
local_query=local_query,
local_load_fn=local_load_fn,
global_query=global_query,
global_load_fn=global_load_fn,
mel_length_threshold=mel_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
else:
train_dataset = AudioDataset(
root_dir=args.train_dumpdir,
audio_query=audio_query,
audio_load_fn=audio_load_fn,
local_query=local_query,
local_load_fn=local_load_fn,
global_query=global_query,
global_load_fn=global_load_fn,
audio_length_threshold=audio_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
else:
train_dataset = AudioMelF0ExcitationDataset(
root_dir=args.train_dumpdir,
audio_query=audio_query,
mel_query=mel_query,
f0_query=f0_query,
excitation_query=excitation_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
f0_load_fn=f0_load_fn,
excitation_load_fn=excitation_load_fn,
mel_length_threshold=mel_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
else:
if use_f0_and_excitation:
raise NotImplementedError(
"SCP format is not supported for f0 and excitation."
)
if use_local_condition:
raise NotImplementedError("Not supported.")
if use_global_condition:
raise NotImplementedError("Not supported.")
if use_aux_input:
train_dataset = AudioMelSCPDataset(
wav_scp=args.train_wav_scp,
feats_scp=args.train_feats_scp,
segments=args.train_segments,
mel_length_threshold=mel_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
else:
train_dataset = AudioSCPDataset(
wav_scp=args.train_wav_scp,
segments=args.train_segments,
audio_length_threshold=audio_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
# define dataset for validation
if args.dev_dumpdir is not None:
if not use_f0_and_excitation:
if use_aux_input:
dev_dataset = AudioMelDataset(
root_dir=args.dev_dumpdir,
audio_query=audio_query,
audio_load_fn=audio_load_fn,
mel_query=mel_query,
mel_load_fn=mel_load_fn,
local_query=local_query,
local_load_fn=local_load_fn,
global_query=global_query,
global_load_fn=global_load_fn,
mel_length_threshold=mel_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
else:
dev_dataset = AudioDataset(
root_dir=args.dev_dumpdir,
audio_query=audio_query,
audio_load_fn=audio_load_fn,
local_query=local_query,
local_load_fn=local_load_fn,
global_query=global_query,
global_load_fn=global_load_fn,
audio_length_threshold=audio_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
else:
dev_dataset = AudioMelF0ExcitationDataset(
root_dir=args.dev_dumpdir,
audio_query=audio_query,
mel_query=mel_query,
f0_query=f0_query,
excitation_query=excitation_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
f0_load_fn=f0_load_fn,
excitation_load_fn=excitation_load_fn,
mel_length_threshold=mel_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
else:
if use_f0_and_excitation:
raise NotImplementedError(
"SCP format is not supported for f0 and excitation."
)
if use_local_condition:
raise NotImplementedError("Not supported.")
if use_global_condition:
raise NotImplementedError("Not supported.")
if use_aux_input:
dev_dataset = AudioMelSCPDataset(
wav_scp=args.dev_wav_scp,
feats_scp=args.dev_feats_scp,
segments=args.dev_segments,
mel_length_threshold=mel_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
else:
dev_dataset = AudioSCPDataset(
wav_scp=args.dev_wav_scp,
segments=args.dev_segments,
audio_length_threshold=audio_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
# store into dataset dict
dataset = {
"train": train_dataset,
"dev": dev_dataset,
}
logging.info(f"The number of training files = {len(train_dataset)}.")
logging.info(f"The number of development files = {len(dev_dataset)}.")
# get data loader
collater = Collater(
batch_max_steps=config["batch_max_steps"],
hop_size=config.get("hop_size", None),
aux_context_window=config["generator_params"].get("aux_context_window", 0),
use_f0_and_excitation=use_f0_and_excitation,
use_noise_input=use_noise_input,
use_aux_input=use_aux_input,
use_duration=use_duration,
use_global_condition=use_global_condition,
use_local_condition=use_local_condition,
pad_value=config["generator_params"].get(
"num_embs", 0
), # assume 0-based discrete symbol
)
sampler = {"train": None, "dev": None}
if args.distributed:
# setup sampler for distributed training
from torch.utils.data.distributed import DistributedSampler
sampler["train"] = DistributedSampler(
dataset=dataset["train"],
num_replicas=args.world_size,
rank=args.rank,
shuffle=True,
)
sampler["dev"] = DistributedSampler(
dataset=dataset["dev"],
num_replicas=args.world_size,
rank=args.rank,
shuffle=False,
)
data_loader = {
"train": DataLoader(
dataset=dataset["train"],
shuffle=False if args.distributed else True,
collate_fn=collater,
batch_size=config["batch_size"],
num_workers=config["num_workers"],
sampler=sampler["train"],
pin_memory=config["pin_memory"],
),
"dev": DataLoader(
dataset=dataset["dev"],
shuffle=False if args.distributed else True,
collate_fn=collater,
batch_size=config["batch_size"],
num_workers=config["num_workers"],
sampler=sampler["dev"],
pin_memory=config["pin_memory"],
),
}
# define models
generator_class = getattr(
parallel_wavegan.models,
# keep compatibility
config.get("generator_type", "ParallelWaveGANGenerator"),
)
discriminator_class = getattr(
parallel_wavegan.models,
# keep compatibility
config.get("discriminator_type", "ParallelWaveGANDiscriminator"),
)
model = {
"generator": generator_class(
**config["generator_params"],
).to(device),
"discriminator": discriminator_class(
**config["discriminator_params"],
).to(device),
}
# define criterions
criterion = {
"gen_adv": GeneratorAdversarialLoss(
# keep compatibility
**config.get("generator_adv_loss_params", {})
).to(device),
"dis_adv": DiscriminatorAdversarialLoss(
# keep compatibility
**config.get("discriminator_adv_loss_params", {})
).to(device),
"mse": torch.nn.MSELoss().to(device),
}
if config.get("use_stft_loss", True): # keep compatibility
config["use_stft_loss"] = True
criterion["stft"] = MultiResolutionSTFTLoss(
**config["stft_loss_params"],
).to(device)
if config.get("use_subband_stft_loss", False): # keep compatibility
assert config["generator_params"]["out_channels"] > 1
criterion["sub_stft"] = MultiResolutionSTFTLoss(
**config["subband_stft_loss_params"],
).to(device)
else:
config["use_subband_stft_loss"] = False
if config.get("use_feat_match_loss", False): # keep compatibility
criterion["feat_match"] = FeatureMatchLoss(
# keep compatibility
**config.get("feat_match_loss_params", {}),
).to(device)
else:
config["use_feat_match_loss"] = False
if config.get("use_mel_loss", False): # keep compatibility
if config.get("mel_loss_params", None) is None:
criterion["mel"] = MelSpectrogramLoss(
fs=config["sampling_rate"],
fft_size=config["fft_size"],
hop_size=config["hop_size"],
win_length=config["win_length"],
window=config["window"],
num_mels=config["num_mels"],
fmin=config["fmin"],
fmax=config["fmax"],
).to(device)
else:
criterion["mel"] = MelSpectrogramLoss(
**config["mel_loss_params"],
).to(device)
else:
config["use_mel_loss"] = False
if config.get("use_duration_loss", False): # keep compatibility
if config.get("duration_loss_params", None) is None:
criterion["duration"] = DurationPredictorLoss(
offset=config["offset"],
reduction=config["reduction"],
).to(device)
else:
criterion["duration"] = DurationPredictorLoss(
**config["duration_loss_params"],
).to(device)
else:
config["use_duration_loss"] = False
# define special module for subband processing
if config["generator_params"]["out_channels"] > 1:
criterion["pqmf"] = PQMF(
subbands=config["generator_params"]["out_channels"],
# keep compatibility
**config.get("pqmf_params", {}),
).to(device)
# define optimizers and schedulers
generator_optimizer_class = getattr(
parallel_wavegan.optimizers,
# keep compatibility
config.get("generator_optimizer_type", "RAdam"),
)
discriminator_optimizer_class = getattr(
parallel_wavegan.optimizers,
# keep compatibility
config.get("discriminator_optimizer_type", "RAdam"),
)
optimizer = {
"generator": generator_optimizer_class(
model["generator"].parameters(),
**config["generator_optimizer_params"],
),
"discriminator": discriminator_optimizer_class(
model["discriminator"].parameters(),
**config["discriminator_optimizer_params"],
),
}
generator_scheduler_class = getattr(
torch.optim.lr_scheduler,
# keep compatibility
config.get("generator_scheduler_type", "StepLR"),
)
discriminator_scheduler_class = getattr(
torch.optim.lr_scheduler,
# keep compatibility
config.get("discriminator_scheduler_type", "StepLR"),
)
scheduler = {
"generator": generator_scheduler_class(
optimizer=optimizer["generator"],
**config["generator_scheduler_params"],
),
"discriminator": discriminator_scheduler_class(
optimizer=optimizer["discriminator"],
**config["discriminator_scheduler_params"],
),
}
if args.distributed:
# wrap model for distributed training
try:
from apex.parallel import DistributedDataParallel
except ImportError:
raise ImportError(
"apex is not installed. please check https://github.com/NVIDIA/apex."
)
model["generator"] = DistributedDataParallel(model["generator"])
model["discriminator"] = DistributedDataParallel(model["discriminator"])
# show settings
logging.info(model["generator"])
logging.info(model["discriminator"])
logging.info(optimizer["generator"])
logging.info(optimizer["discriminator"])
logging.info(scheduler["generator"])
logging.info(scheduler["discriminator"])
for criterion_ in criterion.values():
logging.info(criterion_)
# define trainer
trainer = Trainer(
steps=0,
epochs=0,
data_loader=data_loader,
sampler=sampler,
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
config=config,
device=device,
)
# load pretrained parameters from checkpoint
if len(args.pretrain) != 0:
trainer.load_checkpoint(args.pretrain, load_only_params=True)
logging.info(f"Successfully load parameters from {args.pretrain}.")
# resume from checkpoint
if len(args.resume) != 0:
trainer.load_checkpoint(args.resume)
logging.info(f"Successfully resumed from {args.resume}.")
# run training loop
try:
trainer.run()
finally:
trainer.save_checkpoint(
os.path.join(config["outdir"], f"checkpoint-{trainer.steps}steps.pkl")
)
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 59,276 | 37.218569 | 110 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/bin/preprocess.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Perform preprocessing and raw feature extraction."""
import argparse
import logging
import os
import librosa
import numpy as np
import soundfile as sf
import torch
import yaml
from scipy.interpolate import interp1d
from tqdm import tqdm
from parallel_wavegan.datasets import AudioDataset, AudioSCPDataset
from parallel_wavegan.utils import write_hdf5
def logmelfilterbank(
audio,
sampling_rate,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=None,
fmax=None,
eps=1e-10,
log_base=10.0,
):
"""Compute log-Mel filterbank feature.
Args:
audio (ndarray): Audio signal (T,).
sampling_rate (int): Sampling rate.
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length. If set to None, it will be the same as fft_size.
window (str): Window function type.
num_mels (int): Number of mel basis.
fmin (int): Minimum frequency in mel basis calculation.
fmax (int): Maximum frequency in mel basis calculation.
eps (float): Epsilon value to avoid inf in log calculation.
log_base (float): Log base. If set to None, use np.log.
Returns:
ndarray: Log Mel filterbank feature (#frames, num_mels).
"""
# get amplitude spectrogram
x_stft = librosa.stft(
audio,
n_fft=fft_size,
hop_length=hop_size,
win_length=win_length,
window=window,
pad_mode="reflect",
)
spc = np.abs(x_stft).T # (#frames, #bins)
# get mel basis
fmin = 0 if fmin is None else fmin
fmax = sampling_rate / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(
sr=sampling_rate,
n_fft=fft_size,
n_mels=num_mels,
fmin=fmin,
fmax=fmax,
)
mel = np.maximum(eps, np.dot(spc, mel_basis.T))
if log_base is None:
return np.log(mel)
elif log_base == 10.0:
return np.log10(mel)
elif log_base == 2.0:
return np.log2(mel)
else:
raise ValueError(f"{log_base} is not supported.")
def f0_torchyin(
audio,
sampling_rate,
hop_size=256,
frame_length=None,
pitch_min=40,
pitch_max=10000,
):
"""Compute F0 with Yin.
Args:
audio (ndarray): Audio signal (T,).
sampling_rate (int): Sampling rate.
hop_size (int): Hop size.
pitch_min (int): Minimum pitch in pitch extraction.
pitch_max (int): Maximum pitch in pitch extraction.
Returns:
ndarray: f0 feature (#frames, ).
Note:
Unvoiced frame has value = 0.
"""
torch_wav = torch.from_numpy(audio).float()
if frame_length is not None:
pitch_min = sampling_rate / (frame_length / 2)
import torchyin
pitch = torchyin.estimate(
torch_wav,
sample_rate=sampling_rate,
pitch_min=pitch_min,
pitch_max=pitch_max,
frame_stride=hop_size / sampling_rate,
)
f0 = pitch.cpu().numpy()
nonzeros_idxs = np.where(f0 != 0)[0]
f0[nonzeros_idxs] = np.log(f0[nonzeros_idxs])
return f0
def logf0_and_vuv_pyreaper(audio, fs, hop_size=64, f0min=40.0, f0max=500.0):
"""Extract continuous log f0 and uv sequences.
Args:
audio (ndarray): Audio sequence in float (-1, 1).
fs (ndarray): Sampling rate.
hop_size (int): Hop size in point.
f0min (float): Minimum f0 value.
f0max (float): Maximum f0 value.
Returns:
ndarray: Continuous log f0 sequence (#frames, 1).
ndarray: Voiced (=1) / unvoiced (=0) sequence (#frames, 1).
"""
# delayed import
import pyreaper
# convert to 16 bit interger and extract f0
audio = np.array([round(x * np.iinfo(np.int16).max) for x in audio], dtype=np.int16)
_, _, f0_times, f0, _ = pyreaper.reaper(audio, fs, frame_period=hop_size / fs)
# get vuv
vuv = np.float32(f0 != -1)
if vuv.sum() == 0:
logging.warn("All of the frames are unvoiced.")
return
# get start and end of f0
start_f0 = f0[f0 != -1][0]
end_f0 = f0[f0 != -1][-1]
# padding start and end of f0 sequence
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
voiced_frame_idxs = np.where(f0 != -1)[0]
# perform linear interpolation
f = interp1d(f0_times[voiced_frame_idxs], f0[voiced_frame_idxs])
f0 = f(f0_times)
# convert to log domain
lf0 = np.log(f0)
return lf0.reshape(-1, 1), vuv.reshape(-1, 1)
def main():
"""Run preprocessing process."""
parser = argparse.ArgumentParser(
description=(
"Preprocess audio and then extract features (See detail in"
" parallel_wavegan/bin/preprocess.py)."
)
)
parser.add_argument(
"--wav-scp",
"--scp",
default=None,
type=str,
help="kaldi-style wav.scp file. you need to specify either scp or rootdir.",
)
parser.add_argument(
"--segments",
default=None,
type=str,
help=(
"kaldi-style segments file. if use, you must to specify both scp and"
" segments."
),
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
help=(
"directory including wav files. you need to specify either scp or rootdir."
),
)
parser.add_argument(
"--dumpdir",
type=str,
required=True,
help="directory to dump feature files.",
)
parser.add_argument(
"--config",
type=str,
required=True,
help="yaml format configuration file.",
)
parser.add_argument(
"--utt2spk",
default=None,
type=str,
help=(
"kaldi-style utt2spk file. If you want to add global conditionning with "
"speaker id, you need to specify this argument."
),
)
parser.add_argument(
"--spk2idx",
default=None,
type=str,
help=(
"kaldi-style spk2idx file. If you want to add global conditionning with "
"speaker id, you need to specify this argument."
),
)
parser.add_argument(
"--skip-mel-ext",
default=False,
action="store_true",
help="whether to skip the extraction of mel features.",
)
parser.add_argument(
"--extract-f0",
default=False,
action="store_true",
help="whether to extract f0 sequence.",
)
parser.add_argument(
"--allow-different-sampling-rate",
default=False,
action="store_true",
help="whether to allow different sampling rate in config.",
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
# check model architecture
generator_type = config.get("generator_type", "ParallelWaveGANGenerator")
use_f0_and_excitation = generator_type == "UHiFiGANGenerator"
# check arguments
if (args.wav_scp is not None and args.rootdir is not None) or (
args.wav_scp is None and args.rootdir is None
):
raise ValueError("Please specify either --rootdir or --wav-scp.")
# get dataset
if args.rootdir is not None:
dataset = AudioDataset(
args.rootdir,
"*.wav",
audio_load_fn=sf.read,
return_utt_id=True,
)
else:
dataset = AudioSCPDataset(
args.wav_scp,
segments=args.segments,
return_utt_id=True,
return_sampling_rate=True,
)
# check directly existence
if not os.path.exists(args.dumpdir):
os.makedirs(args.dumpdir, exist_ok=True)
if "sampling_rate_for_feats" not in config:
sampling_rate = config["sampling_rate"]
else:
sampling_rate = config["sampling_rate_for_feats"]
if use_f0_and_excitation:
from parallel_wavegan.layers import SineGen
ExcitationExtractor = SineGen(samp_rate=sampling_rate)
# load spk2utt file
if args.utt2spk is not None:
with open(args.utt2spk) as f:
lines = [line.replace("\n", "") for line in f.readlines()]
utt2spk = {line.split()[0]: line.split()[1] for line in lines}
with open(args.spk2idx) as f:
lines = [line.replace("\n", "") for line in f.readlines()]
spk2idx = {line.split()[0]: int(line.split()[1]) for line in lines}
# process each data
for utt_id, (audio, fs) in tqdm(dataset):
# check
assert len(audio.shape) == 1, f"{utt_id} seems to be multi-channel signal."
assert (
np.abs(audio).max() <= 1.0
), f"{utt_id} seems to be different from 16 bit PCM."
assert (
fs == config["sampling_rate"]
), f"{utt_id} seems to have a different sampling rate."
# trim silence
if config["trim_silence"]:
audio, _ = librosa.effects.trim(
audio,
top_db=config["trim_threshold_in_db"],
frame_length=config["trim_frame_size"],
hop_length=config["trim_hop_size"],
)
if not args.skip_mel_ext:
if "sampling_rate_for_feats" not in config:
x = audio
sampling_rate = config["sampling_rate"]
hop_size = config["hop_size"]
else:
# NOTE(kan-bayashi): this procedure enables to train the model with different
# sampling rate for feature and audio, e.g., training with mel extracted
# using 16 kHz audio and 24 kHz audio as a target waveform
x = librosa.resample(
audio, orig_sr=fs, target_sr=config["sampling_rate_for_feats"]
)
sampling_rate = config["sampling_rate_for_feats"]
assert (
config["hop_size"] * config["sampling_rate_for_feats"] % fs == 0
), (
"hop_size must be int value. please check sampling_rate_for_feats"
" is correct."
)
hop_size = config["hop_size"] * config["sampling_rate_for_feats"] // fs
# extract feature
mel = logmelfilterbank(
x,
sampling_rate=sampling_rate,
hop_size=hop_size,
fft_size=config["fft_size"],
win_length=config["win_length"],
window=config["window"],
num_mels=config["num_mels"],
fmin=config["fmin"],
fmax=config["fmax"],
)
# make sure the audio length and feature length are matched
audio = np.pad(audio, (0, config["fft_size"]), mode="edge")
audio = audio[: len(mel) * config["hop_size"]]
assert len(mel) * config["hop_size"] == len(audio)
# extract f0 sequence
if args.extract_f0:
l_ = logf0_and_vuv_pyreaper(audio, fs, config["hop_size"])
if l_ is None:
continue
l_ = np.concatenate(l_, axis=-1)
if len(audio) > len(l_) * config["hop_size"]:
audio = audio[: len(l_) * config["hop_size"]]
if len(audio) < len(l_) * config["hop_size"]:
audio = np.pad(
audio, (0, len(l_) * config["hop_size"] - len(audio)), mode="edge"
)
if use_f0_and_excitation:
f0 = f0_torchyin(
audio,
sampling_rate=sampling_rate,
hop_size=hop_size,
frame_length=config["win_length"],
).reshape(-1, 1)
if len(f0) > len(mel):
f0 = f0[: len(mel)]
else:
f0 = np.pad(f0, (0, len(mel) - len(f0)), mode="edge")
extended_f0 = (
torch.from_numpy(f0)
.reshape(1, 1, -1)
.repeat(1, config["hop_size"], 1)
.reshape(1, -1, 1)
)
sine_waves, _, _ = ExcitationExtractor(extended_f0)
excitation = sine_waves.squeeze(0).squeeze(-1).cpu().numpy()
excitation = excitation[: len(mel) * config["hop_size"]]
excitation = excitation.reshape(-1, config["hop_size"])
f0 = np.squeeze(f0) # (#frames,)
excitation = np.squeeze(excitation) # (#frames, hop_size)
# apply global gain
if config["global_gain_scale"] > 0.0:
audio *= config["global_gain_scale"]
if np.abs(audio).max() >= 1.0:
logging.warn(
f"{utt_id} causes clipping. "
"it is better to re-consider global gain scale."
)
continue
# save
if config["format"] == "hdf5":
write_hdf5(
os.path.join(args.dumpdir, f"{utt_id}.h5"),
"wave",
audio.astype(np.float32),
)
if not args.skip_mel_ext:
write_hdf5(
os.path.join(args.dumpdir, f"{utt_id}.h5"),
"feats",
mel.astype(np.float32),
)
if use_f0_and_excitation:
write_hdf5(
os.path.join(args.dumpdir, f"{utt_id}.h5"),
"f0",
f0.astype(np.float32),
)
write_hdf5(
os.path.join(args.dumpdir, f"{utt_id}.h5"),
"excitation",
excitation.astype(np.float32),
)
if args.extract_f0:
write_hdf5(
os.path.join(args.dumpdir, f"{utt_id}.h5"),
"local",
l_.astype(np.float32),
)
elif config["format"] == "npy":
np.save(
os.path.join(args.dumpdir, f"{utt_id}-wave.npy"),
audio.astype(np.float32),
allow_pickle=False,
)
if not args.skip_mel_ext:
np.save(
os.path.join(args.dumpdir, f"{utt_id}-feats.npy"),
mel.astype(np.float32),
allow_pickle=False,
)
if use_f0_and_excitation:
np.save(
os.path.join(args.dumpdir, f"{utt_id}-f0.npy"),
f0.astype(np.float32),
allow_pickle=False,
)
np.save(
os.path.join(args.dumpdir, f"{utt_id}-excitation.npy"),
excitation.astype(np.float32),
)
if args.extract_f0:
np.save(
os.path.join(args.dumpdir, f"{utt_id}-local.npy"),
l_.astype(np.float32),
allow_pickle=False,
)
else:
raise ValueError("support only hdf5 or npy format.")
# save global embedding
if config.get("use_global_condition", False):
spk = utt2spk[utt_id]
idx = spk2idx[spk]
if config["format"] == "hdf5":
write_hdf5(
os.path.join(args.dumpdir, f"{utt_id}.h5"), "global", int(idx)
)
elif config["format"] == "npy":
np.save(
os.path.join(args.dumpdir, f"{utt_id}-global.npy"),
int(idx),
allow_pickle=False,
)
if __name__ == "__main__":
main()
| 16,741 | 30.410882 | 93 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/distributed/launch.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Distributed process launcher.
This code is modified from https://github.com/pytorch/pytorch/blob/v1.3.0/torch/distributed/launch.py.
"""
import os
import subprocess
import sys
from argparse import REMAINDER, ArgumentParser
def parse_args():
"""Parse arguments."""
parser = ArgumentParser(
description=(
"PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes"
)
)
# Optional arguments for the launch helper
parser.add_argument(
"--nnodes",
type=int,
default=1,
help="The number of nodes to use for distributed training",
)
parser.add_argument(
"--node_rank",
type=int,
default=0,
help="The rank of the node for multi-node distributed training",
)
parser.add_argument(
"--nproc_per_node",
type=int,
default=1,
help=(
"The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU."
),
)
parser.add_argument(
"--master_addr",
default="127.0.0.1",
type=str,
help=(
"Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1"
),
)
parser.add_argument(
"--master_port",
default=29500,
type=int,
help=(
"Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training"
),
)
parser.add_argument(
"--use_env",
default=False,
action="store_true",
help=(
"Use environment variable to pass "
"'local rank'. For legacy reasons, the default value is False. "
"If set to True, the script will not pass "
"--local_rank as argument, and will instead set LOCAL_RANK."
),
)
parser.add_argument(
"-m",
"--module",
default=False,
action="store_true",
help=(
"Changes each process to interpret the launch script "
"as a python module, executing with the same behavior as"
"'python -m'."
),
)
parser.add_argument(
"-c",
"--command",
default=False,
action="store_true",
help="Changes each process to interpret the launch script as a command.",
)
# positional
parser.add_argument(
"training_script",
type=str,
help=(
"The full path to the single GPU training "
"program/script/command to be launched in parallel, "
"followed by all the arguments for the "
"training script"
),
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args()
def main():
"""Launch distributed processes."""
args = parse_args()
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
if "OMP_NUM_THREADS" not in os.environ and args.nproc_per_node > 1:
current_env["OMP_NUM_THREADS"] = str(1)
print(
"*****************************************\n"
"Setting OMP_NUM_THREADS environment variable for each process "
"to be {} in default, to avoid your system being overloaded, "
"please further tune the variable for optimal performance in "
"your application as needed. \n"
"*****************************************".format(
current_env["OMP_NUM_THREADS"]
)
)
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# spawn the processes
if args.command:
cmd = [args.training_script]
else:
cmd = [sys.executable, "-u"]
if args.module:
cmd.append("-m")
cmd.append(args.training_script)
if not args.use_env:
cmd.append("--local_rank={}".format(local_rank))
cmd.extend(args.training_script_args)
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
if __name__ == "__main__":
main()
| 5,262 | 28.903409 | 102 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/datasets/audio_mel_dataset.py | # -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Dataset modules."""
import logging
import os
from multiprocessing import Manager
import numpy as np
from torch.utils.data import Dataset
from parallel_wavegan.utils import find_files, read_hdf5
class AudioMelDataset(Dataset):
"""PyTorch compatible audio and mel (+global conditioning feature) dataset."""
def __init__(
self,
root_dir,
audio_query="*.h5",
audio_load_fn=lambda x: read_hdf5(x, "wave"),
mel_query="*.h5",
mel_load_fn=lambda x: read_hdf5(x, "feats"),
local_query=None,
local_load_fn=None,
global_query=None,
global_load_fn=None,
audio_length_threshold=None,
mel_length_threshold=None,
return_utt_id=False,
allow_cache=False,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
audio_query (str): Query to find audio files in root_dir.
audio_load_fn (func): Function to load audio file.
mel_query (str): Query to find feature files in root_dir.
mel_load_fn (func): Function to load feature file.
local_query (str): Query to find local conditioning feature files in root_dir.
local_load_fn (func): Function to load local conditioning feature file.
global_query (str): Query to find global conditioning feature files in root_dir.
global_load_fn (func): Function to load global conditioning feature file.
audio_length_threshold (int): Threshold to remove short audio files.
mel_length_threshold (int): Threshold to remove short feature files.
return_utt_id (bool): Whether to return the utterance id with arrays.
allow_cache (bool): Whether to allow cache of the loaded files.
"""
# find all of audio and mel files
audio_files = sorted(find_files(root_dir, audio_query))
mel_files = sorted(find_files(root_dir, mel_query))
self.use_local = local_query is not None
if self.use_local:
local_files = sorted(find_files(root_dir, local_query))
self.use_global = global_query is not None
if self.use_global:
global_files = sorted(find_files(root_dir, global_query))
# filter by threshold
if audio_length_threshold is not None:
audio_lengths = [audio_load_fn(f).shape[0] for f in audio_files]
idxs = [
idx
for idx in range(len(audio_files))
if audio_lengths[idx] > audio_length_threshold
]
if len(audio_files) != len(idxs):
logging.warning(
"Some files are filtered by audio length threshold "
f"({len(audio_files)} -> {len(idxs)})."
)
audio_files = [audio_files[idx] for idx in idxs]
mel_files = [mel_files[idx] for idx in idxs]
if self.use_local:
local_files = [local_files[idx] for idx in idxs]
if self.use_global:
global_files = [global_files[idx] for idx in idxs]
if mel_length_threshold is not None:
mel_lengths = [mel_load_fn(f).shape[0] for f in mel_files]
idxs = [
idx
for idx in range(len(mel_files))
if mel_lengths[idx] > mel_length_threshold
]
if len(mel_files) != len(idxs):
logging.warning(
"Some files are filtered by mel length threshold "
f"({len(mel_files)} -> {len(idxs)})."
)
audio_files = [audio_files[idx] for idx in idxs]
mel_files = [mel_files[idx] for idx in idxs]
if self.use_local:
local_files = [local_files[idx] for idx in idxs]
if self.use_global:
global_files = [global_files[idx] for idx in idxs]
# assert the number of files
assert len(audio_files) != 0, f"Not found any audio files in ${root_dir}."
assert len(audio_files) == len(mel_files), (
f"Number of audio and mel files are different ({len(audio_files)} vs"
f" {len(mel_files)})."
)
if self.use_local:
assert len(audio_files) == len(local_files), (
f"Number of audio and local files are different ({len(audio_files)} vs"
f" {len(local_files)})."
)
if self.use_global:
assert len(audio_files) == len(global_files), (
f"Number of audio and global files are different ({len(audio_files)} vs"
f" {len(global_files)})."
)
self.audio_files = audio_files
self.audio_load_fn = audio_load_fn
self.mel_files = mel_files
self.mel_load_fn = mel_load_fn
if self.use_local:
self.local_files = local_files
self.local_load_fn = local_load_fn
if self.use_global:
self.global_files = global_files
self.global_load_fn = global_load_fn
if ".npy" in audio_query:
self.utt_ids = [
os.path.basename(f).replace("-wave.npy", "") for f in audio_files
]
else:
self.utt_ids = [
os.path.splitext(os.path.basename(f))[0] for f in audio_files
]
self.return_utt_id = return_utt_id
self.allow_cache = allow_cache
if allow_cache:
# NOTE(kan-bayashi): Manager is need to share memory in dataloader with num_workers > 0
self.manager = Manager()
self.caches = self.manager.list()
self.caches += [() for _ in range(len(audio_files))]
def __getitem__(self, idx):
"""Get specified idx items.
Args:
idx (int): Index of the item.
Returns:
str: Utterance id (only in return_utt_id = True).
ndarray: Audio signal (T,).
ndarray: Feature (T', C).
ndarray: Local feature (T' C').
ndarray: Global feature (1,).
"""
if self.allow_cache and len(self.caches[idx]) != 0:
return self.caches[idx]
utt_id = self.utt_ids[idx]
audio = self.audio_load_fn(self.audio_files[idx])
mel = self.mel_load_fn(self.mel_files[idx])
items = (audio, mel)
if self.use_local:
l_ = self.local_load_fn(self.local_files[idx])
items = items + (l_,)
if self.use_global:
g = self.global_load_fn(self.global_files[idx]).reshape(-1)
items = items + (g,)
if self.return_utt_id:
items = (utt_id,) + items
if self.allow_cache:
self.caches[idx] = items
return items
def __len__(self):
"""Return dataset length.
Returns:
int: The length of dataset.
"""
return len(self.audio_files)
class AudioMelF0ExcitationDataset(Dataset):
"""PyTorch compatible audio and mel dataset."""
def __init__(
self,
root_dir,
audio_query="*.h5",
mel_query="*.h5",
f0_query="*.h5",
excitation_query="*.h5",
audio_load_fn=lambda x: read_hdf5(x, "wave"),
mel_load_fn=lambda x: read_hdf5(x, "feats"),
f0_load_fn=lambda x: read_hdf5(x, "f0"),
excitation_load_fn=lambda x: read_hdf5(x, "excitation"),
audio_length_threshold=None,
mel_length_threshold=None,
return_utt_id=False,
allow_cache=False,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
audio_query (str): Query to find audio files in root_dir.
mel_query (str): Query to find mel feature files in root_dir.
f0_query (str): Query to find f0 feature files in root_dir.
excitation_query (str): Query to find excitation feature files in root_dir.
audio_load_fn (func): Function to load audio file.
mel_load_fn (func): Function to load mel feature file.
audio_load_fn (func): Function to load audio file.
mel_load_fn (func): Function to load mel feature file.
f0_load_fn (func): Function to load f0 feature file.
excitation_load_fn (func): Function to load excitation feature file.
audio_length_threshold (int): Threshold to remove short audio files.
mel_length_threshold (int): Threshold to remove short feature files.
return_utt_id (bool): Whether to return the utterance id with arrays.
allow_cache (bool): Whether to allow cache of the loaded files.
"""
# find all of audio and mel files
audio_files = sorted(find_files(root_dir, audio_query))
mel_files = sorted(find_files(root_dir, mel_query))
f0_files = sorted(find_files(root_dir, f0_query))
excitation_files = sorted(find_files(root_dir, excitation_query))
# filter by threshold
if audio_length_threshold is not None:
audio_lengths = [audio_load_fn(f).shape[0] for f in audio_files]
idxs = [
idx
for idx in range(len(audio_files))
if audio_lengths[idx] > audio_length_threshold
]
if len(audio_files) != len(idxs):
logging.warning(
"Some files are filtered by audio length threshold "
f"({len(audio_files)} -> {len(idxs)})."
)
audio_files = [audio_files[idx] for idx in idxs]
mel_files = [mel_files[idx] for idx in idxs]
f0_files = [f0_files[idx] for idx in idxs]
excitation_files = [excitation_files[idx] for idx in idxs]
if mel_length_threshold is not None:
mel_lengths = [mel_load_fn(f).shape[0] for f in mel_files]
idxs = [
idx
for idx in range(len(mel_files))
if mel_lengths[idx] > mel_length_threshold
]
if len(mel_files) != len(idxs):
logging.warning(
"Some files are filtered by mel length threshold "
f"({len(mel_files)} -> {len(idxs)})."
)
audio_files = [audio_files[idx] for idx in idxs]
mel_files = [mel_files[idx] for idx in idxs]
f0_files = [f0_files[idx] for idx in idxs]
excitation_files = [excitation_files[idx] for idx in idxs]
# assert the number of files
assert len(audio_files) != 0, f"Not found any audio files in ${root_dir}."
assert len(audio_files) == len(mel_files), (
f"Number of audio and mel files are different ({len(audio_files)} vs"
f" {len(mel_files)})."
)
assert len(audio_files) == len(f0_files), (
f"Number of audio and f0 files are different ({len(audio_files)} vs"
f" {len(f0_files)})."
)
assert len(audio_files) == len(excitation_files), (
f"Number of audio and excitation files are different ({len(audio_files)} vs"
f" {len(excitation_files)})."
)
self.audio_files = audio_files
self.audio_load_fn = audio_load_fn
self.mel_files = mel_files
self.mel_load_fn = mel_load_fn
self.f0_files = f0_files
self.f0_load_fn = f0_load_fn
self.excitation_files = excitation_files
self.excitation_load_fn = excitation_load_fn
if ".npy" in audio_query:
self.utt_ids = [
os.path.basename(f).replace("-wave.npy", "") for f in audio_files
]
else:
self.utt_ids = [
os.path.splitext(os.path.basename(f))[0] for f in audio_files
]
self.return_utt_id = return_utt_id
self.allow_cache = allow_cache
if allow_cache:
# NOTE(kan-bayashi): Manager is need to share memory in dataloader with num_workers > 0
self.manager = Manager()
self.caches = self.manager.list()
self.caches += [() for _ in range(len(audio_files))]
def __getitem__(self, idx):
"""Get specified idx items.
Args:
idx (int): Index of the item.
Returns:
str: Utterance id (only in return_utt_id = True).
ndarray: Audio signal (T,).
ndarray: Feature (T', C).
ndarray: Feature (T', ).
ndarray: Feature (T', C').
"""
if self.allow_cache and len(self.caches[idx]) != 0:
return self.caches[idx]
utt_id = self.utt_ids[idx]
audio = self.audio_load_fn(self.audio_files[idx])
mel = self.mel_load_fn(self.mel_files[idx])
f0 = self.f0_load_fn(self.f0_files[idx])
excitation = self.excitation_load_fn(self.excitation_files[idx])
if self.return_utt_id:
items = utt_id, audio, mel, f0, excitation
else:
items = audio, mel, f0, excitation
if self.allow_cache:
self.caches[idx] = items
return items
def __len__(self):
"""Return dataset length.
Returns:
int: The length of dataset.
"""
return len(self.audio_files)
class AudioDataset(Dataset):
"""PyTorch compatible audio dataset."""
def __init__(
self,
root_dir,
audio_query="*-wave.npy",
audio_length_threshold=None,
audio_load_fn=np.load,
local_query=None,
local_load_fn=None,
global_query=None,
global_load_fn=None,
return_utt_id=False,
allow_cache=False,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
audio_query (str): Query to find audio files in root_dir.
audio_load_fn (func): Function to load audio file.
audio_length_threshold (int): Threshold to remove short audio files.
local_query (str): Query to find local conditioning feature files in root_dir.
local_load_fn (func): Function to load local conditioning feature file.
global_query (str): Query to find global conditioning feature files in root_dir.
global_load_fn (func): Function to load global conditioning feature file.
return_utt_id (bool): Whether to return the utterance id with arrays.
allow_cache (bool): Whether to allow cache of the loaded files.
"""
# find all of audio and mel files
audio_files = sorted(find_files(root_dir, audio_query))
self.use_local = local_query is not None
self.use_global = global_query is not None
if self.use_local:
local_files = sorted(find_files(root_dir, local_query))
if self.use_global:
global_files = sorted(find_files(root_dir, global_query))
# filter by threshold
if audio_length_threshold is not None:
audio_lengths = [audio_load_fn(f).shape[0] for f in audio_files]
idxs = [
idx
for idx in range(len(audio_files))
if audio_lengths[idx] > audio_length_threshold
]
if len(audio_files) != len(idxs):
logging.warning(
"some files are filtered by audio length threshold "
f"({len(audio_files)} -> {len(idxs)})."
)
audio_files = [audio_files[idx] for idx in idxs]
if self.use_local:
local_files = [local_files[idx] for idx in idxs]
if self.use_global:
global_files = [global_files[idx] for idx in idxs]
# assert the number of files
assert len(audio_files) != 0, f"Not found any audio files in ${root_dir}."
if self.use_local:
assert len(audio_files) == len(local_files), (
f"Number of audio and local files are different ({len(audio_files)} vs"
f" {len(local_files)})."
)
if self.use_global:
assert len(audio_files) == len(global_files), (
f"Number of audio and global files are different ({len(audio_files)} vs"
f" {len(global_files)})."
)
self.audio_files = audio_files
self.audio_load_fn = audio_load_fn
if self.use_local:
self.local_files = local_files
self.local_load_fn = local_load_fn
if self.use_global:
self.global_files = global_files
self.global_load_fn = global_load_fn
if ".npy" in audio_query:
self.utt_ids = [
os.path.basename(f).replace("-wave.npy", "") for f in audio_files
]
else:
self.utt_ids = [
os.path.splitext(os.path.basename(f))[0] for f in audio_files
]
self.return_utt_id = return_utt_id
self.allow_cache = allow_cache
if allow_cache:
# NOTE(kan-bayashi): Manager is need to share memory in dataloader with num_workers > 0
self.manager = Manager()
self.caches = self.manager.list()
self.caches += [() for _ in range(len(audio_files))]
def __getitem__(self, idx):
"""Get specified idx items.
Args:
idx (int): Index of the item.
Returns:
str: Utterance id (only in return_utt_id = True).
ndarray: Audio (T,).
ndarray: Feature (1,).
"""
if self.allow_cache and len(self.caches[idx]) != 0:
return self.caches[idx]
utt_id = self.utt_ids[idx]
audio = self.audio_load_fn(self.audio_files[idx])
items = (audio,)
if self.use_local:
l_ = self.local_load_fn(self.local_files[idx])
items = items + (l_,)
if self.use_global:
g = self.global_load_fn(self.global_files[idx]).reshape(-1)
items = items + (g,)
if self.return_utt_id:
items = (utt_id,) + items
# NOTE(kan-bayashi): if the return item is one, do not return as tuple
if len(items) == 1:
items = items[0]
if self.allow_cache:
self.caches[idx] = items
return items
def __len__(self):
"""Return dataset length.
Returns:
int: The length of dataset.
"""
return len(self.audio_files)
class MelDataset(Dataset):
"""PyTorch compatible mel (+global conditioning feature) dataset."""
def __init__(
self,
root_dir,
mel_query="*.h5",
mel_load_fn=lambda x: read_hdf5(x, "feats"),
local_query=None,
local_load_fn=None,
global_query=None,
global_load_fn=None,
mel_length_threshold=None,
return_utt_id=False,
allow_cache=False,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
mel_query (str): Query to find feature files in root_dir.
mel_load_fn (func): Function to load feature file.
local_query (str): Query to find local conditioning feature files in root_dir.
local_load_fn (func): Function to load local conditioning feature file.
global_query (str): Query to find global conditioning feature files in root_dir.
global_load_fn (func): Function to load global conditioning feature file.
mel_length_threshold (int): Threshold to remove short feature files.
return_utt_id (bool): Whether to return the utterance id with arrays.
allow_cache (bool): Whether to allow cache of the loaded files.
"""
# find all of audio and mel files
mel_files = sorted(find_files(root_dir, mel_query))
self.use_local = local_query is not None
self.use_global = global_query is not None
if self.use_local:
local_files = sorted(find_files(root_dir, local_query))
if self.use_global:
global_files = sorted(find_files(root_dir, global_query))
# filter by threshold
if mel_length_threshold is not None:
mel_lengths = [mel_load_fn(f).shape[0] for f in mel_files]
idxs = [
idx
for idx in range(len(mel_files))
if mel_lengths[idx] > mel_length_threshold
]
if len(mel_files) != len(idxs):
logging.warning(
"Some files are filtered by mel length threshold "
f"({len(mel_files)} -> {len(idxs)})."
)
mel_files = [mel_files[idx] for idx in idxs]
if self.use_local:
local_files = [local_files[idx] for idx in idxs]
if self.use_global:
global_files = [global_files[idx] for idx in idxs]
# assert the number of files
if self.use_local:
assert len(mel_files) == len(local_files), (
f"Number of audio and local files are different ({len(mel_files)} vs"
f" {len(local_files)})."
)
if self.use_global:
assert len(mel_files) == len(global_files), (
f"Number of audio and global files are different ({len(mel_files)} vs"
f" {len(global_files)})."
)
self.mel_files = mel_files
self.mel_load_fn = mel_load_fn
if self.use_local:
self.local_files = local_files
self.local_load_fn = local_load_fn
if self.use_global:
self.global_files = global_files
self.global_load_fn = global_load_fn
if ".npy" in mel_query:
self.utt_ids = [
os.path.basename(f).replace("-feats.npy", "") for f in mel_files
]
else:
self.utt_ids = [os.path.splitext(os.path.basename(f))[0] for f in mel_files]
self.return_utt_id = return_utt_id
self.allow_cache = allow_cache
if allow_cache:
# NOTE(kan-bayashi): Manager is need to share memory in dataloader with num_workers > 0
self.manager = Manager()
self.caches = self.manager.list()
self.caches += [() for _ in range(len(mel_files))]
def __getitem__(self, idx):
"""Get specified idx items.
Args:
idx (int): Index of the item.
Returns:
str: Utterance id (only in return_utt_id = True).
ndarray: Feature (T', C).
ndarray: Feature (1,).
"""
if self.allow_cache and len(self.caches[idx]) != 0:
return self.caches[idx]
utt_id = self.utt_ids[idx]
mel = self.mel_load_fn(self.mel_files[idx])
items = (mel,)
if self.use_local:
l_ = self.local_load_fn(self.local_files[idx])
items = items + (l_,)
if self.use_global:
g = self.global_load_fn(self.global_files[idx]).reshape(-1)
items = items + (g,)
if self.return_utt_id:
items = (utt_id,) + items
# NOTE(kan-bayashi): if the return item is one, do not return as tuple
if len(items) == 1:
items = items[0]
if self.allow_cache:
self.caches[idx] = items
return items
def __len__(self):
"""Return dataset length.
Returns:
int: The length of dataset.
"""
return len(self.mel_files)
class MelF0ExcitationDataset(Dataset):
"""PyTorch compatible mel dataset."""
def __init__(
self,
root_dir,
mel_query="*-feats.npy",
f0_query="*-f0.npy",
excitation_query="*-excitation.npy",
mel_length_threshold=None,
mel_load_fn=np.load,
f0_load_fn=np.load,
excitation_load_fn=np.load,
return_utt_id=False,
allow_cache=False,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
mel_query (str): Query to find feature files in root_dir.
mel_load_fn (func): Function to load feature file.
mel_length_threshold (int): Threshold to remove short feature files.
return_utt_id (bool): Whether to return the utterance id with arrays.
allow_cache (bool): Whether to allow cache of the loaded files.
"""
# find all of the mel files
mel_files = sorted(find_files(root_dir, mel_query))
f0_files = sorted(find_files(root_dir, f0_query))
excitation_files = sorted(find_files(root_dir, excitation_query))
# filter by threshold
if mel_length_threshold is not None:
mel_lengths = [mel_load_fn(f).shape[0] for f in mel_files]
idxs = [
idx
for idx in range(len(mel_files))
if mel_lengths[idx] > mel_length_threshold
]
if len(mel_files) != len(idxs):
logging.warning(
"Some files are filtered by mel length threshold "
f"({len(mel_files)} -> {len(idxs)})."
)
mel_files = [mel_files[idx] for idx in idxs]
f0_files = [f0_files[idx] for idx in idxs]
excitation_files = [excitation_files[idx] for idx in idxs]
# assert the number of files
assert len(mel_files) != 0, f"Not found any mel files in ${root_dir}."
assert len(f0_files) != 0, f"Not found any f0 files in ${root_dir}."
assert (
len(excitation_files) != 0
), f"Not found any excitation files in ${root_dir}."
self.mel_files = mel_files
self.mel_load_fn = mel_load_fn
self.f0_files = f0_files
self.f0_load_fn = f0_load_fn
self.excitation_files = excitation_files
self.excitation_load_fn = excitation_load_fn
self.utt_ids = [os.path.splitext(os.path.basename(f))[0] for f in mel_files]
if ".npy" in mel_query:
self.utt_ids = [
os.path.basename(f).replace("-feats.npy", "") for f in mel_files
]
else:
self.utt_ids = [os.path.splitext(os.path.basename(f))[0] for f in mel_files]
self.return_utt_id = return_utt_id
self.allow_cache = allow_cache
if allow_cache:
# NOTE(kan-bayashi): Manager is need to share memory in dataloader with num_workers > 0
self.manager = Manager()
self.caches = self.manager.list()
self.caches += [() for _ in range(len(mel_files))]
def __getitem__(self, idx):
"""Get specified idx items.
Args:
idx (int): Index of the item.
Returns:
str: Utterance id (only in return_utt_id = True).
ndarray: Feature (T', C).
"""
if self.allow_cache and len(self.caches[idx]) != 0:
return self.caches[idx]
utt_id = self.utt_ids[idx]
mel = self.mel_load_fn(self.mel_files[idx])
f0 = self.f0_load_fn(self.f0_files[idx])
excitation = self.excitation_load_fn(self.excitation_files[idx])
if self.return_utt_id:
items = utt_id, mel, f0, excitation
else:
items = mel, f0, excitation
if self.allow_cache:
self.caches[idx] = items
return items
def __len__(self):
"""Return dataset length.
Returns:
int: The length of dataset.
"""
return len(self.mel_files)
| 27,965 | 35.894459 | 99 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/datasets/scp_dataset.py | # -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Dataset modules based on kaldi-style scp files."""
import logging
from multiprocessing import Manager
import kaldiio
import numpy as np
from torch.utils.data import Dataset
from parallel_wavegan.utils import HDF5ScpLoader, NpyScpLoader
def _get_feats_scp_loader(feats_scp):
# read the first line of feats.scp file
with open(feats_scp) as f:
key, value = f.readlines()[0].replace("\n", "").split()
# check scp type
if ":" in value:
value_1, value_2 = value.split(":")
if value_1.endswith(".ark"):
# kaldi-ark case: utt_id_1 /path/to/utt_id_1.ark:index
return kaldiio.load_scp(feats_scp)
elif value_1.endswith(".h5"):
# hdf5 case with path in hdf5: utt_id_1 /path/to/utt_id_1.h5:feats
return HDF5ScpLoader(feats_scp)
else:
raise ValueError("Not supported feats.scp type.")
else:
if value.endswith(".h5"):
# hdf5 case without path in hdf5: utt_id_1 /path/to/utt_id_1.h5
return HDF5ScpLoader(feats_scp)
elif value.endswith(".npy"):
# npy case: utt_id_1 /path/to/utt_id_1.npy
return NpyScpLoader(feats_scp)
else:
raise ValueError("Not supported feats.scp type.")
class AudioMelSCPDataset(Dataset):
"""PyTorch compatible audio and mel dataset based on kaldi-stype scp files."""
def __init__(
self,
wav_scp,
feats_scp,
segments=None,
audio_length_threshold=None,
mel_length_threshold=None,
return_utt_id=False,
return_sampling_rate=False,
allow_cache=False,
):
"""Initialize dataset.
Args:
wav_scp (str): Kaldi-style wav.scp file.
feats_scp (str): Kaldi-style fests.scp file.
segments (str): Kaldi-style segments file.
audio_length_threshold (int): Threshold to remove short audio files.
mel_length_threshold (int): Threshold to remove short feature files.
return_utt_id (bool): Whether to return utterance id.
return_sampling_rate (bool): Wheter to return sampling rate.
allow_cache (bool): Whether to allow cache of the loaded files.
"""
# load scp as lazy dict
audio_loader = kaldiio.load_scp(wav_scp, segments=segments)
mel_loader = _get_feats_scp_loader(feats_scp)
audio_keys = list(audio_loader.keys())
mel_keys = list(mel_loader.keys())
# filter by threshold
if audio_length_threshold is not None:
audio_lengths = [audio.shape[0] for _, audio in audio_loader.values()]
idxs = [
idx
for idx in range(len(audio_keys))
if audio_lengths[idx] > audio_length_threshold
]
if len(audio_keys) != len(idxs):
logging.warning(
"Some files are filtered by audio length threshold "
f"({len(audio_keys)} -> {len(idxs)})."
)
audio_keys = [audio_keys[idx] for idx in idxs]
mel_keys = [mel_keys[idx] for idx in idxs]
if mel_length_threshold is not None:
mel_lengths = [mel.shape[0] for mel in mel_loader.values()]
idxs = [
idx
for idx in range(len(mel_keys))
if mel_lengths[idx] > mel_length_threshold
]
if len(mel_keys) != len(idxs):
logging.warning(
"Some files are filtered by mel length threshold "
f"({len(mel_keys)} -> {len(idxs)})."
)
audio_keys = [audio_keys[idx] for idx in idxs]
mel_keys = [mel_keys[idx] for idx in idxs]
# assert the number of files
assert len(audio_keys) == len(mel_keys), (
f"Number of audio and mel files are different ({len(audio_keys)} vs"
f" {len(mel_keys)})."
)
self.audio_loader = audio_loader
self.mel_loader = mel_loader
self.utt_ids = audio_keys
self.return_utt_id = return_utt_id
self.return_sampling_rate = return_sampling_rate
self.allow_cache = allow_cache
if allow_cache:
# NOTE(kan-bayashi): Manager is need to share memory in dataloader with num_workers > 0
self.manager = Manager()
self.caches = self.manager.list()
self.caches += [() for _ in range(len(self.utt_ids))]
def __getitem__(self, idx):
"""Get specified idx items.
Args:
idx (int): Index of the item.
Returns:
str: Utterance id (only in return_utt_id = True).
ndarray or tuple: Audio signal (T,) or (w/ sampling rate if return_sampling_rate = True).
ndarray: Feature (T', C).
"""
if self.allow_cache and len(self.caches[idx]) != 0:
return self.caches[idx]
utt_id = self.utt_ids[idx]
fs, audio = self.audio_loader[utt_id]
mel = self.mel_loader[utt_id]
# normalize audio signal to be [-1, 1]
audio = audio.astype(np.float32)
audio /= 1 << (16 - 1) # assume that wav is PCM 16 bit
if self.return_sampling_rate:
audio = (audio, fs)
if self.return_utt_id:
items = utt_id, audio, mel
else:
items = audio, mel
if self.allow_cache:
self.caches[idx] = items
return items
def __len__(self):
"""Return dataset length.
Returns:
int: The length of dataset.
"""
return len(self.utt_ids)
class AudioSCPDataset(Dataset):
"""PyTorch compatible audio dataset based on kaldi-stype scp files."""
def __init__(
self,
wav_scp,
segments=None,
audio_length_threshold=None,
return_utt_id=False,
return_sampling_rate=False,
allow_cache=False,
):
"""Initialize dataset.
Args:
wav_scp (str): Kaldi-style wav.scp file.
segments (str): Kaldi-style segments file.
audio_length_threshold (int): Threshold to remove short audio files.
return_utt_id (bool): Whether to return utterance id.
return_sampling_rate (bool): Wheter to return sampling rate.
allow_cache (bool): Whether to allow cache of the loaded files.
"""
# load scp as lazy dict
audio_loader = kaldiio.load_scp(wav_scp, segments=segments)
audio_keys = list(audio_loader.keys())
# filter by threshold
if audio_length_threshold is not None:
audio_lengths = [audio.shape[0] for _, audio in audio_loader.values()]
idxs = [
idx
for idx in range(len(audio_keys))
if audio_lengths[idx] > audio_length_threshold
]
if len(audio_keys) != len(idxs):
logging.warning(
"Some files are filtered by audio length threshold "
f"({len(audio_keys)} -> {len(idxs)})."
)
audio_keys = [audio_keys[idx] for idx in idxs]
self.audio_loader = audio_loader
self.utt_ids = audio_keys
self.return_utt_id = return_utt_id
self.return_sampling_rate = return_sampling_rate
self.allow_cache = allow_cache
if allow_cache:
# NOTE(kan-bayashi): Manager is need to share memory in dataloader with num_workers > 0
self.manager = Manager()
self.caches = self.manager.list()
self.caches += [() for _ in range(len(self.utt_ids))]
def __getitem__(self, idx):
"""Get specified idx items.
Args:
idx (int): Index of the item.
Returns:
str: Utterance id (only in return_utt_id = True).
ndarray or tuple: Audio signal (T,) or (w/ sampling rate if return_sampling_rate = True).
"""
if self.allow_cache and len(self.caches[idx]) != 0:
return self.caches[idx]
utt_id = self.utt_ids[idx]
fs, audio = self.audio_loader[utt_id]
# normalize audio signal to be [-1, 1]
audio = audio.astype(np.float32)
audio /= 1 << (16 - 1) # assume that wav is PCM 16 bit
if self.return_sampling_rate:
audio = (audio, fs)
if self.return_utt_id:
items = utt_id, audio
else:
items = audio
if self.allow_cache:
self.caches[idx] = items
return items
def __len__(self):
"""Return dataset length.
Returns:
int: The length of dataset.
"""
return len(self.utt_ids)
class MelSCPDataset(Dataset):
"""PyTorch compatible mel dataset based on kaldi-stype scp files."""
def __init__(
self,
feats_scp,
mel_length_threshold=None,
return_utt_id=False,
allow_cache=False,
):
"""Initialize dataset.
Args:
feats_scp (str): Kaldi-style fests.scp file.
mel_length_threshold (int): Threshold to remove short feature files.
return_utt_id (bool): Whether to return utterance id.
allow_cache (bool): Whether to allow cache of the loaded files.
"""
# load scp as lazy dict
mel_loader = _get_feats_scp_loader(feats_scp)
mel_keys = list(mel_loader.keys())
# filter by threshold
if mel_length_threshold is not None:
mel_lengths = [mel.shape[0] for mel in mel_loader.values()]
idxs = [
idx
for idx in range(len(mel_keys))
if mel_lengths[idx] > mel_length_threshold
]
if len(mel_keys) != len(idxs):
logging.warning(
"Some files are filtered by mel length threshold "
f"({len(mel_keys)} -> {len(idxs)})."
)
mel_keys = [mel_keys[idx] for idx in idxs]
self.mel_loader = mel_loader
self.utt_ids = mel_keys
self.return_utt_id = return_utt_id
self.allow_cache = allow_cache
if allow_cache:
# NOTE(kan-bayashi): Manager is need to share memory in dataloader with num_workers > 0
self.manager = Manager()
self.caches = self.manager.list()
self.caches += [() for _ in range(len(self.utt_ids))]
def __getitem__(self, idx):
"""Get specified idx items.
Args:
idx (int): Index of the item.
Returns:
str: Utterance id (only in return_utt_id = True).
ndarray: Feature (T', C).
"""
if self.allow_cache and len(self.caches[idx]) != 0:
return self.caches[idx]
utt_id = self.utt_ids[idx]
mel = self.mel_loader[utt_id]
if self.return_utt_id:
items = utt_id, mel
else:
items = mel
if self.allow_cache:
self.caches[idx] = items
return items
def __len__(self):
"""Return dataset length.
Returns:
int: The length of dataset.
"""
return len(self.utt_ids)
| 11,431 | 31.202817 | 101 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/residual_stack.py | # -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Residual stack module in MelGAN."""
import torch
from parallel_wavegan.layers import CausalConv1d
class ResidualStack(torch.nn.Module):
"""Residual stack module introduced in MelGAN."""
def __init__(
self,
kernel_size=3,
channels=32,
dilation=1,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
use_causal_conv=False,
):
"""Initialize ResidualStack module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
channels (int): Number of channels of convolution layers.
dilation (int): Dilation factor.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (dict): Hyperparameters for padding function.
use_causal_conv (bool): Whether to use causal convolution.
"""
super(ResidualStack, self).__init__()
# defile residual stack part
if not use_causal_conv:
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
self.stack = torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
getattr(torch.nn, pad)((kernel_size - 1) // 2 * dilation, **pad_params),
torch.nn.Conv1d(
channels, channels, kernel_size, dilation=dilation, bias=bias
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
torch.nn.Conv1d(channels, channels, 1, bias=bias),
)
else:
self.stack = torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
CausalConv1d(
channels,
channels,
kernel_size,
dilation=dilation,
bias=bias,
pad=pad,
pad_params=pad_params,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
torch.nn.Conv1d(channels, channels, 1, bias=bias),
)
# defile extra layer for skip connection
self.skip_layer = torch.nn.Conv1d(channels, channels, 1, bias=bias)
def forward(self, c):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, chennels, T).
"""
return self.stack(c) + self.skip_layer(c)
| 3,073 | 34.744186 | 88 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/pqmf.py | # -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Pseudo QMF modules."""
import numpy as np
import torch
import torch.nn.functional as F
from scipy.signal import kaiser
def design_prototype_filter(taps=62, cutoff_ratio=0.142, beta=9.0):
"""Design prototype filter for PQMF.
This method is based on `A Kaiser window approach for the design of prototype
filters of cosine modulated filterbanks`_.
Args:
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
Returns:
ndarray: Impluse response of prototype filter (taps + 1,).
.. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
https://ieeexplore.ieee.org/abstract/document/681427
"""
# check the arguments are valid
assert taps % 2 == 0, "The number of taps mush be even number."
assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
# make initial filter
omega_c = np.pi * cutoff_ratio
with np.errstate(invalid="ignore"):
h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) / (
np.pi * (np.arange(taps + 1) - 0.5 * taps)
)
h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form
# apply kaiser window
w = kaiser(taps + 1, beta)
h = h_i * w
return h
class PQMF(torch.nn.Module):
"""PQMF module.
This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_.
.. _`Near-perfect-reconstruction pseudo-QMF banks`:
https://ieeexplore.ieee.org/document/258122
"""
def __init__(self, subbands=4, taps=62, cutoff_ratio=0.142, beta=9.0):
"""Initilize PQMF module.
The cutoff_ratio and beta parameters are optimized for #subbands = 4.
See dicussion in https://github.com/kan-bayashi/ParallelWaveGAN/issues/195.
Args:
subbands (int): The number of subbands.
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
"""
super(PQMF, self).__init__()
# build analysis & synthesis filter coefficients
h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
h_analysis = np.zeros((subbands, len(h_proto)))
h_synthesis = np.zeros((subbands, len(h_proto)))
for k in range(subbands):
h_analysis[k] = (
2
* h_proto
* np.cos(
(2 * k + 1)
* (np.pi / (2 * subbands))
* (np.arange(taps + 1) - (taps / 2))
+ (-1) ** k * np.pi / 4
)
)
h_synthesis[k] = (
2
* h_proto
* np.cos(
(2 * k + 1)
* (np.pi / (2 * subbands))
* (np.arange(taps + 1) - (taps / 2))
- (-1) ** k * np.pi / 4
)
)
# convert to tensor
analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1)
synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0)
# register coefficients as beffer
self.register_buffer("analysis_filter", analysis_filter)
self.register_buffer("synthesis_filter", synthesis_filter)
# filter for downsampling & upsampling
updown_filter = torch.zeros((subbands, subbands, subbands)).float()
for k in range(subbands):
updown_filter[k, k, 0] = 1.0
self.register_buffer("updown_filter", updown_filter)
self.subbands = subbands
# keep padding info
self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
def analysis(self, x):
"""Analysis with PQMF.
Args:
x (Tensor): Input tensor (B, 1, T).
Returns:
Tensor: Output tensor (B, subbands, T // subbands).
"""
x = F.conv1d(self.pad_fn(x), self.analysis_filter)
return F.conv1d(x, self.updown_filter, stride=self.subbands)
def synthesis(self, x):
"""Synthesis with PQMF.
Args:
x (Tensor): Input tensor (B, subbands, T // subbands).
Returns:
Tensor: Output tensor (B, 1, T).
"""
# NOTE(kan-bayashi): Power will be dreased so here multipy by # subbands.
# Not sure this is the correct way, it is better to check again.
# TODO(kan-bayashi): Understand the reconstruction procedure
x = F.conv_transpose1d(
x, self.updown_filter * self.subbands, stride=self.subbands
)
return F.conv1d(self.pad_fn(x), self.synthesis_filter)
| 4,907 | 31.72 | 103 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/tf_layers.py | # -*- coding: utf-8 -*-
# Copyright 2020 MINH ANH (@dathudeptrai)
# MIT License (https://opensource.org/licenses/MIT)
"""Tensorflow Layer modules complatible with pytorch."""
import tensorflow as tf
class TFReflectionPad1d(tf.keras.layers.Layer):
"""Tensorflow ReflectionPad1d module."""
def __init__(self, padding_size):
"""Initialize TFReflectionPad1d module.
Args:
padding_size (int): Padding size.
"""
super(TFReflectionPad1d, self).__init__()
self.padding_size = padding_size
@tf.function
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, 1, C).
Returns:
Tensor: Padded tensor (B, T + 2 * padding_size, 1, C).
"""
return tf.pad(
x,
[[0, 0], [self.padding_size, self.padding_size], [0, 0], [0, 0]],
"REFLECT",
)
class TFConvTranspose1d(tf.keras.layers.Layer):
"""Tensorflow ConvTranspose1d module."""
def __init__(self, channels, kernel_size, stride, padding):
"""Initialize TFConvTranspose1d( module.
Args:
channels (int): Number of channels.
kernel_size (int): kernel size.
strides (int): Stride width.
padding (str): Padding type ("same" or "valid").
"""
super(TFConvTranspose1d, self).__init__()
self.conv1d_transpose = tf.keras.layers.Conv2DTranspose(
filters=channels,
kernel_size=(kernel_size, 1),
strides=(stride, 1),
padding=padding,
)
@tf.function
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, 1, C).
Returns:
Tensors: Output tensor (B, T', 1, C').
"""
x = self.conv1d_transpose(x)
return x
class TFResidualStack(tf.keras.layers.Layer):
"""Tensorflow ResidualStack module."""
def __init__(
self,
kernel_size,
channels,
dilation,
bias,
nonlinear_activation,
nonlinear_activation_params,
padding,
):
"""Initialize TFResidualStack module.
Args:
kernel_size (int): Kernel size.
channles (int): Number of channels.
dilation (int): Dilation ine.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
padding (str): Padding type ("same" or "valid").
"""
super(TFResidualStack, self).__init__()
self.block = [
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
),
TFReflectionPad1d(dilation),
tf.keras.layers.Conv2D(
filters=channels,
kernel_size=(kernel_size, 1),
dilation_rate=(dilation, 1),
use_bias=bias,
padding="valid",
),
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
),
tf.keras.layers.Conv2D(filters=channels, kernel_size=1, use_bias=bias),
]
self.shortcut = tf.keras.layers.Conv2D(
filters=channels, kernel_size=1, use_bias=bias
)
@tf.function
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, 1, C).
Returns:
Tensor: Output tensor (B, T, 1, C).
"""
_x = tf.identity(x)
for i, layer in enumerate(self.block):
_x = layer(_x)
shortcut = self.shortcut(x)
return shortcut + _x
| 3,916 | 26.780142 | 88 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/duration_predictor.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# 2023 Jiatong Shi
# Adapted from ESPnet fastspeech duration predictor
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Duration predictor related modules."""
import torch
from parallel_wavegan.layers.layer_norm import LayerNorm
class DurationPredictor(torch.nn.Module):
"""Duration predictor module.
This is a module of duration predictor described
in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
The duration predictor predicts a duration of each frame in log domain
from the hidden embeddings of encoder.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
Note:
The calculation domain of outputs is different
between in `forward` and in `inference`. In `forward`,
the outputs are calculated in log domain but in `inference`,
those are calculated in linear domain.
"""
def __init__(
self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0
):
"""Initilize duration predictor module.
Args:
idim (int): Input dimension.
n_layers (int, optional): Number of convolutional layers.
n_chans (int, optional): Number of channels of convolutional layers.
kernel_size (int, optional): Kernel size of convolutional layers.
dropout_rate (float, optional): Dropout rate.
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(DurationPredictor, self).__init__()
self.offset = offset
self.conv = torch.nn.ModuleList()
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chans,
n_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
),
torch.nn.ReLU(),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate),
)
]
self.linear = torch.nn.Linear(n_chans, 1)
def _forward(self, xs, x_masks=None, is_inference=False):
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
xs = f(xs) # (B, C, Tmax)
# NOTE: calculate in log domain
xs = self.linear(xs.transpose(1, -1)).squeeze(-1) # (B, Tmax)
if is_inference:
# NOTE: calculate in linear domain
xs = torch.clamp(
torch.round(xs.exp() - self.offset), min=0
).long() # avoid negative value
if x_masks is not None:
xs = xs.masked_fill(x_masks, 0.0)
return xs
def forward(self, xs, x_masks=None):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor, optional):
Batch of masks indicating padded part (B, Tmax).
Returns:
Tensor: Batch of predicted durations in log domain (B, Tmax).
"""
return self._forward(xs, x_masks, False)
def inference(self, xs, x_masks=None):
"""Inference duration.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor, optional):
Batch of masks indicating padded part (B, Tmax).
Returns:
LongTensor: Batch of predicted durations in linear domain (B, Tmax).
"""
return self._forward(xs, x_masks, True)
| 3,820 | 31.65812 | 88 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/tade_res_block.py | # Copyright 2021 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""StyleMelGAN's TADEResBlock Modules."""
from functools import partial
import torch
class TADELayer(torch.nn.Module):
"""TADE Layer module."""
def __init__(
self,
in_channels=64,
aux_channels=80,
kernel_size=9,
bias=True,
upsample_factor=2,
upsample_mode="nearest",
):
"""Initilize TADE layer."""
super().__init__()
self.norm = torch.nn.InstanceNorm1d(in_channels)
self.aux_conv = torch.nn.Sequential(
torch.nn.Conv1d(
aux_channels,
in_channels,
kernel_size,
1,
bias=bias,
padding=(kernel_size - 1) // 2,
),
# NOTE(kan-bayashi): Use non-linear activation?
)
self.gated_conv = torch.nn.Sequential(
torch.nn.Conv1d(
in_channels,
in_channels * 2,
kernel_size,
1,
bias=bias,
padding=(kernel_size - 1) // 2,
),
# NOTE(kan-bayashi): Use non-linear activation?
)
self.upsample = torch.nn.Upsample(
scale_factor=upsample_factor, mode=upsample_mode
)
def forward(self, x, c):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T).
c (Tensor): Auxiliary input tensor (B, aux_channels, T').
Returns:
Tensor: Output tensor (B, in_channels, T * in_upsample_factor).
Tensor: Upsampled aux tensor (B, in_channels, T * aux_upsample_factor).
"""
x = self.norm(x)
c = self.upsample(c)
c = self.aux_conv(c)
cg = self.gated_conv(c)
cg1, cg2 = cg.split(cg.size(1) // 2, dim=1)
# NOTE(kan-bayashi): Use upsample for noise input here?
y = cg1 * self.upsample(x) + cg2
# NOTE(kan-bayashi): Return upsampled aux here?
return y, c
class TADEResBlock(torch.nn.Module):
"""TADEResBlock module."""
def __init__(
self,
in_channels=64,
aux_channels=80,
kernel_size=9,
dilation=2,
bias=True,
upsample_factor=2,
upsample_mode="nearest",
gated_function="softmax",
):
"""Initialize TADEResBlock module."""
super().__init__()
self.tade1 = TADELayer(
in_channels=in_channels,
aux_channels=aux_channels,
kernel_size=kernel_size,
bias=bias,
# NOTE(kan-bayashi): Use upsample in the first TADE layer?
upsample_factor=1,
upsample_mode=upsample_mode,
)
self.gated_conv1 = torch.nn.Conv1d(
in_channels,
in_channels * 2,
kernel_size,
1,
bias=bias,
padding=(kernel_size - 1) // 2,
)
self.tade2 = TADELayer(
in_channels=in_channels,
aux_channels=in_channels,
kernel_size=kernel_size,
bias=bias,
upsample_factor=upsample_factor,
upsample_mode=upsample_mode,
)
self.gated_conv2 = torch.nn.Conv1d(
in_channels,
in_channels * 2,
kernel_size,
1,
bias=bias,
dilation=dilation,
padding=(kernel_size - 1) // 2 * dilation,
)
self.upsample = torch.nn.Upsample(
scale_factor=upsample_factor, mode=upsample_mode
)
if gated_function == "softmax":
self.gated_function = partial(torch.softmax, dim=1)
elif gated_function == "sigmoid":
self.gated_function = torch.sigmoid
else:
raise ValueError(f"{gated_function} is not supported.")
def forward(self, x, c):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T).
c (Tensor): Auxiliary input tensor (B, aux_channels, T').
Returns:
Tensor: Output tensor (B, in_channels, T * in_upsample_factor).
Tensor: Upsampled auxirialy tensor (B, in_channels, T * in_upsample_factor).
"""
residual = x
x, c = self.tade1(x, c)
x = self.gated_conv1(x)
xa, xb = x.split(x.size(1) // 2, dim=1)
x = self.gated_function(xa) * torch.tanh(xb)
x, c = self.tade2(x, c)
x = self.gated_conv2(x)
xa, xb = x.split(x.size(1) // 2, dim=1)
x = self.gated_function(xa) * torch.tanh(xb)
# NOTE(kan-bayashi): Return upsampled aux here?
return self.upsample(residual) + x, c
| 4,805 | 28.850932 | 88 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/sine.py | """Sing generator."""
import numpy as np
import torch
class SineGen(torch.nn.Module):
"""Definition of sine generator."""
def __init__(
self,
samp_rate,
harmonic_num=0,
sine_amp=0.1,
noise_std=0.003,
voiced_threshold=0,
flag_for_pulse=False,
):
"""Initialize sine generator.
Args:
samp_rate (int): sampling rate in Hz.
harmonic_num (int): number of harmonic overtones (default 0).
sine_amp (float): amplitude of sine-wavefrom (default 0.1).
noise_std (float): std of Gaussian noise (default 0.003).
voiced_thoreshold (int): F0 threshold for U/V classification (default 0).
flag_for_pulse (bool): Whether this SineGen module is used inside PulseGen (default False).
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
"""Convert f0 to sine.
Args:
f0_values: (B, length, dim) where dim indicates fundamental tone and overtones.
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(
f0_values.shape[0], f0_values.shape[2], device=f0_values.device
)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x *2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(
torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
"""Calculate forward propagation.
Args:
f0 (torch.Tensor): F0 tensor (B, T, 1), f0 for unvoiced steps should be 0.
Returns:
torch.Tensor: Sine tensor (B, T, 1).
torch.Tensor: UV tensor (B, T, 1).
torch.Tensor: Noise tensor (B, T, 1).
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
# for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
| 5,554 | 36.789116 | 103 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/residual_block.py | # -*- coding: utf-8 -*-
"""Residual block modules.
References:
- https://github.com/r9y9/wavenet_vocoder
- https://github.com/jik876/hifi-gan
"""
import math
import torch
import torch.nn.functional as F
from parallel_wavegan.layers.causal_conv import CausalConv1d
class Conv1d(torch.nn.Conv1d):
"""Conv1d module with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv1d module."""
super(Conv1d, self).__init__(*args, **kwargs)
def reset_parameters(self):
"""Reset parameters."""
torch.nn.init.kaiming_normal_(self.weight, nonlinearity="relu")
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
class Conv1d1x1(Conv1d):
"""1x1 Conv1d with customized initialization."""
def __init__(self, in_channels, out_channels, bias):
"""Initialize 1x1 Conv1d module."""
super(Conv1d1x1, self).__init__(
in_channels, out_channels, kernel_size=1, padding=0, dilation=1, bias=bias
)
class WaveNetResidualBlock(torch.nn.Module):
"""Residual block module in WaveNet."""
def __init__(
self,
kernel_size=3,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
dropout=0.0,
dilation=1,
bias=True,
use_causal_conv=False,
):
"""Initialize WaveNetResidualBlock module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
residual_channels (int): Number of channels for residual connection.
skip_channels (int): Number of channels for skip connection.
aux_channels (int): Local conditioning channels i.e. auxiliary input dimension.
dropout (float): Dropout probability.
dilation (int): Dilation factor.
bias (bool): Whether to add bias parameter in convolution layers.
use_causal_conv (bool): Whether to use use_causal_conv or non-use_causal_conv convolution.
"""
super().__init__()
self.dropout = dropout
# no future time stamps available
if use_causal_conv:
padding = (kernel_size - 1) * dilation
else:
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
padding = (kernel_size - 1) // 2 * dilation
self.use_causal_conv = use_causal_conv
# dilation conv
self.conv = Conv1d(
residual_channels,
gate_channels,
kernel_size,
padding=padding,
dilation=dilation,
bias=bias,
)
# local conditioning
if aux_channels > 0:
self.conv1x1_aux = Conv1d1x1(aux_channels, gate_channels, bias=False)
else:
self.conv1x1_aux = None
# conv output is split into two groups
gate_out_channels = gate_channels // 2
self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, bias=bias)
self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_channels, bias=bias)
def forward(self, x, c):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, residual_channels, T).
c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T).
Returns:
Tensor: Output tensor for residual connection (B, residual_channels, T).
Tensor: Output tensor for skip connection (B, skip_channels, T).
"""
residual = x
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv(x)
# remove future time steps if use_causal_conv conv
x = x[:, :, : residual.size(-1)] if self.use_causal_conv else x
# split into two part for gated activation
splitdim = 1
xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim)
# local conditioning
if c is not None:
assert self.conv1x1_aux is not None
c = self.conv1x1_aux(c)
ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim)
xa, xb = xa + ca, xb + cb
x = torch.tanh(xa) * torch.sigmoid(xb)
# for skip connection
s = self.conv1x1_skip(x)
# for residual connection
x = (self.conv1x1_out(x) + residual) * math.sqrt(0.5)
return x, s
class HiFiGANResidualBlock(torch.nn.Module):
"""Residual block module in HiFiGAN."""
def __init__(
self,
kernel_size=3,
channels=512,
dilations=(1, 3, 5),
bias=True,
use_additional_convs=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_causal_conv=False,
):
"""Initialize HiFiGANResidualBlock module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
channels (int): Number of channels for convolution layer.
dilations (List[int]): List of dilation factors.
use_additional_convs (bool): Whether to use additional convolution layers.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_causal_conv (bool): Whether to use causal structure.
"""
super().__init__()
self.use_additional_convs = use_additional_convs
self.convs1 = torch.nn.ModuleList()
if use_additional_convs:
self.convs2 = torch.nn.ModuleList()
self.use_causal_conv = use_causal_conv
assert kernel_size % 2 == 1, "Kernel size must be odd number."
for dilation in dilations:
if not use_causal_conv:
self.convs1 += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation,
bias=bias,
padding=(kernel_size - 1) // 2 * dilation,
),
)
]
else:
self.convs1 += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
CausalConv1d(
channels,
channels,
kernel_size,
dilation=dilation,
bias=bias,
),
)
]
if use_additional_convs:
if not use_causal_conv:
self.convs2 += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.Conv1d(
channels,
channels,
kernel_size,
dilation=1,
bias=bias,
padding=(kernel_size - 1) // 2,
),
)
]
else:
self.convs2 += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
CausalConv1d(
channels,
channels,
kernel_size,
dilation=1,
bias=bias,
),
),
]
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, channels, T).
"""
for idx in range(len(self.convs1)):
xt = self.convs1[idx](x)
if self.use_additional_convs:
xt = self.convs2[idx](xt)
x = xt + x
return x
| 8,832 | 33.104247 | 102 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/variance_predictor.py | #!/usr/bin/env python3
# Copyright 2020 Tomoki Hayashi
# 2023 Jiatong Shi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Variance predictor related modules."""
import torch
from typeguard import check_argument_types
from parallel_wavegan.layers.layer_norm import LayerNorm
class VariancePredictor(torch.nn.Module):
"""Variance predictor module.
This is a module of variacne predictor described in `FastSpeech 2:
Fast and High-Quality End-to-End Text to Speech`_.
.. _`FastSpeech 2: Fast and High-Quality End-to-End Text to Speech`:
https://arxiv.org/abs/2006.04558
"""
def __init__(
self,
idim: int,
n_layers: int = 2,
n_chans: int = 384,
kernel_size: int = 3,
bias: bool = True,
dropout_rate: float = 0.5,
):
"""Initilize duration predictor module.
Args:
idim (int): Input dimension.
n_layers (int): Number of convolutional layers.
n_chans (int): Number of channels of convolutional layers.
kernel_size (int): Kernel size of convolutional layers.
dropout_rate (float): Dropout rate.
"""
assert check_argument_types()
super().__init__()
self.conv = torch.nn.ModuleList()
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chans,
n_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
bias=bias,
),
torch.nn.ReLU(),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate),
)
]
self.linear = torch.nn.Linear(n_chans, 1)
def forward(self, xs: torch.Tensor, x_masks: torch.Tensor = None) -> torch.Tensor:
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor): Batch of masks indicating padded part (B, Tmax).
Returns:
Tensor: Batch of predicted sequences (B, Tmax, 1).
"""
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
xs = f(xs) # (B, C, Tmax)
xs = self.linear(xs.transpose(1, 2)) # (B, Tmax, 1)
if x_masks is not None:
xs = xs.masked_fill(x_masks, 0.0)
return xs
| 2,637 | 28.977273 | 86 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/causal_conv.py | # -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Causal convolusion layer modules."""
import torch
class CausalConv1d(torch.nn.Module):
"""CausalConv1d module with customized initialization."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
dilation=1,
bias=True,
pad="ConstantPad1d",
pad_params={"value": 0.0},
):
"""Initialize CausalConv1d module."""
super(CausalConv1d, self).__init__()
self.pad = getattr(torch.nn, pad)((kernel_size - 1) * dilation, **pad_params)
self.conv = torch.nn.Conv1d(
in_channels, out_channels, kernel_size, dilation=dilation, bias=bias
)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T).
Returns:
Tensor: Output tensor (B, out_channels, T).
"""
return self.conv(self.pad(x))[:, :, : x.size(2)]
class CausalConvTranspose1d(torch.nn.Module):
"""CausalConvTranspose1d module with customized initialization."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
bias=True,
pad="ReplicationPad1d",
pad_params={},
):
"""Initialize CausalConvTranspose1d module."""
super(CausalConvTranspose1d, self).__init__()
# NOTE (yoneyama): This padding is to match the number of inputs
# used to calculate the first output sample with the others.
self.pad = getattr(torch.nn, pad)((1, 0), **pad_params)
self.deconv = torch.nn.ConvTranspose1d(
in_channels, out_channels, kernel_size, stride, bias=bias
)
self.stride = stride
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T_in).
Returns:
Tensor: Output tensor (B, out_channels, T_out).
"""
return self.deconv(self.pad(x))[:, :, self.stride : -self.stride]
| 2,162 | 26.379747 | 85 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/layer_norm.py | """LayerNorm for specific dimensions.
Adapted from ESPnet Transformer LayerNorm.
"""
import torch
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
Args:
nout (int): Output dim size.
dim (int): Dimension to be normalized.
"""
def __init__(self, nout, dim=-1):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=1e-12)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Normalized tensor.
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return (
super(LayerNorm, self)
.forward(x.transpose(self.dim, -1))
.transpose(self.dim, -1)
)
| 870 | 20.243902 | 56 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/vector_quantize_codebook.py | # -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Vector quantize codebook modules.
This code is modified from https://github.com/ritheshkumar95/pytorch-vqvae.
"""
import torch
from parallel_wavegan.functions import vector_quantize, vector_quantize_straight_through
class VQCodebook(torch.nn.Module):
"""Vector quantize codebook module."""
def __init__(self, num_embeds, embed_dim):
"""Initialize VQCodebook module.
Args:
num_embeds (int): Number of embeddings.
embed_dim (int): Dimension of each embedding.
"""
super(VQCodebook, self).__init__()
self.embedding = torch.nn.Embedding(num_embeds, embed_dim)
self.embedding.weight.data.uniform_(-1.0 / num_embeds, 1.0 / num_embeds)
def forward(self, z_e):
"""Calculate forward propagation.
Args:
z_e (Tensor): Input tensor (B, embed_dim, T).
Returns:
LongTensor: Codebook indices (B, T).
"""
z_e_ = z_e.transpose(2, 1).contiguous()
indices = vector_quantize(z_e_, self.embedding.weight)
return indices
def straight_through(self, z_e):
"""Calculate forward propagation with straight through technique.
Args:
z_e (Tensor): Input tensor (B, embed_dim, T).
Returns:
Tensor: Codebook embeddings for the decoder inputs (B, embed_dim, T).
Tensor: Codebook embeddings for the quantization loss (B, embed_dim, T).
"""
# get embeddings for the decoder inputs
z_e_ = z_e.transpose(2, 1).contiguous()
z_q_, indices = vector_quantize_straight_through(
z_e_, self.embedding.weight.detach()
)
z_q = z_q_.transpose(2, 1).contiguous()
# get embedding for the quantization loss
z_q_bar_flatten = torch.index_select(
self.embedding.weight, dim=0, index=indices
)
z_q_bar_ = z_q_bar_flatten.view_as(z_e_)
z_q_bar = z_q_bar_.transpose(1, 2).contiguous()
return z_q, z_q_bar
| 2,132 | 28.219178 | 88 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/upsample.py | # -*- coding: utf-8 -*-
"""Upsampling module.
This code is modified from https://github.com/r9y9/wavenet_vocoder.
"""
import numpy as np
import torch
import torch.nn.functional as F
from parallel_wavegan.layers import Conv1d
class Stretch2d(torch.nn.Module):
"""Stretch2d module."""
def __init__(self, x_scale, y_scale, mode="nearest"):
"""Initialize Stretch2d module.
Args:
x_scale (int): X scaling factor (Time axis in spectrogram).
y_scale (int): Y scaling factor (Frequency axis in spectrogram).
mode (str): Interpolation mode.
"""
super(Stretch2d, self).__init__()
self.x_scale = x_scale
self.y_scale = y_scale
self.mode = mode
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, C, F, T).
Returns:
Tensor: Interpolated tensor (B, C, F * y_scale, T * x_scale),
"""
return F.interpolate(
x, scale_factor=(self.y_scale, self.x_scale), mode=self.mode
)
class Conv2d(torch.nn.Conv2d):
"""Conv2d module with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv2d module."""
super(Conv2d, self).__init__(*args, **kwargs)
def reset_parameters(self):
"""Reset parameters."""
self.weight.data.fill_(1.0 / np.prod(self.kernel_size))
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
class UpsampleNetwork(torch.nn.Module):
"""Upsampling network module."""
def __init__(
self,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
use_causal_conv=False,
):
"""Initialize upsampling network module.
Args:
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
interpolate_mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
"""
super(UpsampleNetwork, self).__init__()
self.use_causal_conv = use_causal_conv
self.up_layers = torch.nn.ModuleList()
for scale in upsample_scales:
# interpolation layer
stretch = Stretch2d(scale, 1, interpolate_mode)
self.up_layers += [stretch]
# conv layer
assert (
freq_axis_kernel_size - 1
) % 2 == 0, "Not support even number freq axis kernel size."
freq_axis_padding = (freq_axis_kernel_size - 1) // 2
kernel_size = (freq_axis_kernel_size, scale * 2 + 1)
if use_causal_conv:
padding = (freq_axis_padding, scale * 2)
else:
padding = (freq_axis_padding, scale)
conv = Conv2d(1, 1, kernel_size=kernel_size, padding=padding, bias=False)
self.up_layers += [conv]
# nonlinear
if nonlinear_activation is not None:
nonlinear = getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
)
self.up_layers += [nonlinear]
def forward(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, C, T).
Returns:
Tensor: Upsampled tensor (B, C, T'), where T' = T * prod(upsample_scales).
"""
c = c.unsqueeze(1) # (B, 1, C, T)
for f in self.up_layers:
if self.use_causal_conv and isinstance(f, Conv2d):
c = f(c)[..., : c.size(-1)]
else:
c = f(c)
return c.squeeze(1) # (B, C, T')
class ConvInUpsampleNetwork(torch.nn.Module):
"""Convolution + upsampling network module."""
def __init__(
self,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
aux_channels=80,
aux_context_window=0,
use_causal_conv=False,
):
"""Initialize convolution + upsampling network module.
Args:
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
aux_channels (int): Number of channels of pre-convolutional layer.
aux_context_window (int): Context window size of the pre-convolutional layer.
use_causal_conv (bool): Whether to use causal structure.
"""
super(ConvInUpsampleNetwork, self).__init__()
self.aux_context_window = aux_context_window
self.use_causal_conv = use_causal_conv and aux_context_window > 0
# To capture wide-context information in conditional features
kernel_size = (
aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1
)
# NOTE(kan-bayashi): Here do not use padding because the input is already padded
self.conv_in = Conv1d(
aux_channels, aux_channels, kernel_size=kernel_size, bias=False
)
self.upsample = UpsampleNetwork(
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
use_causal_conv=use_causal_conv,
)
def forward(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, C, T').
Returns:
Tensor: Upsampled tensor (B, C, T),
where T = (T' - aux_context_window * 2) * prod(upsample_scales).
Note:
The length of inputs considers the context window size.
"""
c_ = self.conv_in(c)
c = c_[:, :, : -self.aux_context_window] if self.use_causal_conv else c_
return self.upsample(c)
| 6,489 | 32.282051 | 92 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/layers/length_regulator.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# 2023 Jiatong Shi
# Adapated from ESPnet Fastspeech LengthRegulator
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Length regulator related modules."""
import logging
import torch
def pad_list(xs, pad_value):
"""Perform padding for the list of tensors.
Args:
xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
pad_value (float): Value for padding.
Returns:
Tensor: Padded tensor (B, Tmax, `*`).
Examples:
>>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
>>> x
[tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
>>> pad_list(x, 0)
tensor([[1., 1., 1., 1.],
[1., 1., 0., 0.],
[1., 0., 0., 0.]])
"""
n_batch = len(xs)
max_len = max(x.size(0) for x in xs)
pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
for i in range(n_batch):
pad[i, : xs[i].size(0)] = xs[i]
return pad
class LengthRegulator(torch.nn.Module):
"""Length regulator module for feed-forward Transformer.
This is a module of length regulator described in
`FastSpeech: Fast, Robust and Controllable Text to Speech`_.
The length regulator expands char or
phoneme-level embedding features to frame-level by repeating each
feature based on the corresponding predicted durations.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, pad_value=0.0):
"""Initilize length regulator module.
Args:
pad_value (float, optional): Value used for padding.
"""
super().__init__()
self.pad_value = pad_value
def forward(self, xs, ds, alpha=1.0):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of sequences of char or phoneme embeddings (B, Tmax, D).
ds (LongTensor): Batch of durations of each frame (B, T).
alpha (float, optional): Alpha value to control speed of speech.
Returns:
Tensor: replicated input tensor based on durations (B, T*, D).
"""
if alpha != 1.0:
assert alpha > 0
ds = torch.round(ds.float() * alpha).long()
if ds.sum() == 0:
logging.warning(
"predicted durations includes all 0 sequences. "
"fill the first element with 1."
)
# NOTE(kan-bayashi): This case must not be happened in teacher forcing.
# It will be happened in inference with a bad duration predictor.
# So we do not need to care the padded sequence case here.
ds[ds.sum(dim=1).eq(0)] = 1
repeat = [torch.repeat_interleave(x, d, dim=0) for x, d in zip(xs, ds)]
return pad_list(repeat, self.pad_value)
| 2,984 | 29.151515 | 87 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/optimizers/radam.py | # -*- coding: utf-8 -*-
"""RAdam optimizer.
This code is drived from https://github.com/LiyuanLucasLiu/RAdam.
"""
import math
import torch
from torch.optim.optimizer import Optimizer
class RAdam(Optimizer):
"""Rectified Adam optimizer."""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
"""Initilize RAdam optimizer."""
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
"""Set state."""
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
"""Run one step."""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError("RAdam does not support sparse gradients")
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(p_data_fp32)
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state["step"] += 1
buffered = self.buffer[int(state["step"] % 10)]
if state["step"] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state["step"]
beta2_t = beta2 ** state["step"]
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state["step"] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
) / (
1 - beta1 ** state["step"]
) # NOQA
else:
step_size = 1.0 / (1 - beta1 ** state["step"])
buffered[2] = step_size
if group["weight_decay"] != 0:
p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group["eps"])
p_data_fp32.addcdiv_(-step_size * group["lr"], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group["lr"], exp_avg)
p.data.copy_(p_data_fp32)
return loss
| 3,632 | 35.33 | 87 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/optimizers/__init__.py | from torch.optim import * # NOQA
from .radam import * # NOQA
| 64 | 15.25 | 33 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/utils/utils.py | # -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Utility functions."""
import fnmatch
import logging
import os
import re
import sys
import tarfile
from distutils.version import LooseVersion
import h5py
import numpy as np
import torch
import yaml
from filelock import FileLock
PRETRAINED_MODEL_LIST = {
"ljspeech_parallel_wavegan.v1": "1PdZv37JhAQH6AwNh31QlqruqrvjTBq7U",
"ljspeech_parallel_wavegan.v1.long": "1A9TsrD9fHxFviJVFjCk5W6lkzWXwhftv",
"ljspeech_parallel_wavegan.v1.no_limit": "1CdWKSiKoFNPZyF1lo7Dsj6cPKmfLJe72",
"ljspeech_parallel_wavegan.v3": "1-oZpwpWZMMolDYsCqeL12dFkXSBD9VBq",
"ljspeech_melgan.v1": "1i7-FPf9LPsYLHM6yNPoJdw5Q9d28C-ip",
"ljspeech_melgan.v1.long": "1x1b_R7d2561nqweK3FPb2muTdcFIYTu6",
"ljspeech_melgan.v3": "1J5gJ_FUZhOAKiRFWiAK6FcO5Z6oYJbmQ",
"ljspeech_melgan.v3.long": "124JnaLcRe7TsuAGh3XIClS3C7Wom9AU2",
"ljspeech_full_band_melgan.v2": "1Kb7q5zBeQ30Wsnma0X23G08zvgDG5oen",
"ljspeech_multi_band_melgan.v2": "1b70pJefKI8DhGYz4SxbEHpxm92tj1_qC",
"ljspeech_hifigan.v1": "1i6-hR_ksEssCYNlNII86v3AoeA1JcuWD",
"ljspeech_style_melgan.v1": "10aJSZfmCAobQJgRGio6cNyw6Xlgmme9-",
"jsut_parallel_wavegan.v1": "1qok91A6wuubuz4be-P9R2zKhNmQXG0VQ",
"jsut_multi_band_melgan.v2": "1chTt-76q2p69WPpZ1t1tt8szcM96IKad",
"jsut_hifigan.v1": "1vdgqTu9YKyGMCn-G7H2fI6UBC_4_55XB",
"jsut_style_melgan.v1": "1VIkjSxYxAGUVEvJxNLaOaJ7Twe48SH-s",
"csmsc_parallel_wavegan.v1": "1QTOAokhD5dtRnqlMPTXTW91-CG7jf74e",
"csmsc_multi_band_melgan.v2": "1G6trTmt0Szq-jWv2QDhqglMdWqQxiXQT",
"csmsc_hifigan.v1": "1fVKGEUrdhGjIilc21Sf0jODulAq6D1qY",
"csmsc_style_melgan.v1": "1kGUC_b9oVSv24vZRi66AAbSNUKJmbSCX",
"arctic_slt_parallel_wavegan.v1": "1_MXePg40-7DTjD0CDVzyduwQuW_O9aA1",
"jnas_parallel_wavegan.v1": "1D2TgvO206ixdLI90IqG787V6ySoXLsV_",
"vctk_parallel_wavegan.v1": "1bqEFLgAroDcgUy5ZFP4g2O2MwcwWLEca",
"vctk_parallel_wavegan.v1.long": "1tO4-mFrZ3aVYotgg7M519oobYkD4O_0-",
"vctk_multi_band_melgan.v2": "10PRQpHMFPE7RjF-MHYqvupK9S0xwBlJ_",
"vctk_hifigan.v1": "1oVOC4Vf0DYLdDp4r7GChfgj7Xh5xd0ex",
"vctk_style_melgan.v1": "14ThSEgjvl_iuFMdEGuNp7d3DulJHS9Mk",
"libritts_parallel_wavegan.v1": "1zHQl8kUYEuZ_i1qEFU6g2MEu99k3sHmR",
"libritts_parallel_wavegan.v1.long": "1b9zyBYGCCaJu0TIus5GXoMF8M3YEbqOw",
"libritts_multi_band_melgan.v2": "1kIDSBjrQvAsRewHPiFwBZ3FDelTWMp64",
"libritts_hifigan.v1": "1_TVFIvVtMn-Z4NiQrtrS20uSJOvBsnu1",
"libritts_style_melgan.v1": "1yuQakiMP0ECdB55IoxEGCbXDnNkWCoBg",
"kss_parallel_wavegan.v1": "1mLtQAzZHLiGSWguKCGG0EZa4C_xUO5gX",
"hui_acg_hokuspokus_parallel_wavegan.v1": "1irKf3okMLau56WNeOnhr2ZfSVESyQCGS",
"ruslan_parallel_wavegan.v1": "1M3UM6HN6wrfSe5jdgXwBnAIl_lJzLzuI",
}
def find_files(root_dir, query="*.wav", include_root_dir=True):
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (str): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
list: List of found filenames.
"""
files = []
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
for filename in fnmatch.filter(filenames, query):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
def read_hdf5(hdf5_name, hdf5_path):
"""Read hdf5 dataset.
Args:
hdf5_name (str): Filename of hdf5 file.
hdf5_path (str): Dataset name in hdf5 file.
Return:
any: Dataset values.
"""
if not os.path.exists(hdf5_name):
logging.error(f"There is no such a hdf5 file ({hdf5_name}).")
sys.exit(1)
hdf5_file = h5py.File(hdf5_name, "r")
if hdf5_path not in hdf5_file:
logging.error(f"There is no such a data in hdf5 file. ({hdf5_path})")
sys.exit(1)
hdf5_data = hdf5_file[hdf5_path][()]
hdf5_file.close()
return hdf5_data
def write_hdf5(hdf5_name, hdf5_path, write_data, is_overwrite=True):
"""Write dataset to hdf5.
Args:
hdf5_name (str): Hdf5 dataset filename.
hdf5_path (str): Dataset path in hdf5.
write_data (ndarray): Data to write.
is_overwrite (bool): Whether to overwrite dataset.
"""
# convert to numpy array
write_data = np.array(write_data)
# check folder existence
folder_name, _ = os.path.split(hdf5_name)
if not os.path.exists(folder_name) and len(folder_name) != 0:
os.makedirs(folder_name)
# check hdf5 existence
if os.path.exists(hdf5_name):
# if already exists, open with r+ mode
hdf5_file = h5py.File(hdf5_name, "r+")
# check dataset existence
if hdf5_path in hdf5_file:
if is_overwrite:
logging.warning(
"Dataset in hdf5 file already exists. recreate dataset in hdf5."
)
hdf5_file.__delitem__(hdf5_path)
else:
logging.error(
"Dataset in hdf5 file already exists. "
"if you want to overwrite, please set is_overwrite = True."
)
hdf5_file.close()
sys.exit(1)
else:
# if not exists, open with w mode
hdf5_file = h5py.File(hdf5_name, "w")
# write data to hdf5
hdf5_file.create_dataset(hdf5_path, data=write_data)
hdf5_file.flush()
hdf5_file.close()
class HDF5ScpLoader(object):
"""Loader class for a fests.scp file of hdf5 file.
Examples:
key1 /some/path/a.h5:feats
key2 /some/path/b.h5:feats
key3 /some/path/c.h5:feats
key4 /some/path/d.h5:feats
...
>>> loader = HDF5ScpLoader("hdf5.scp")
>>> array = loader["key1"]
key1 /some/path/a.h5
key2 /some/path/b.h5
key3 /some/path/c.h5
key4 /some/path/d.h5
...
>>> loader = HDF5ScpLoader("hdf5.scp", "feats")
>>> array = loader["key1"]
key1 /some/path/a.h5:feats_1,feats_2
key2 /some/path/b.h5:feats_1,feats_2
key3 /some/path/c.h5:feats_1,feats_2
key4 /some/path/d.h5:feats_1,feats_2
...
>>> loader = HDF5ScpLoader("hdf5.scp")
# feats_1 and feats_2 will be concatenated
>>> array = loader["key1"]
"""
def __init__(self, feats_scp, default_hdf5_path="feats"):
"""Initialize HDF5 scp loader.
Args:
feats_scp (str): Kaldi-style feats.scp file with hdf5 format.
default_hdf5_path (str): Path in hdf5 file. If the scp contain the info, not used.
"""
self.default_hdf5_path = default_hdf5_path
with open(feats_scp) as f:
lines = [line.replace("\n", "") for line in f.readlines()]
self.data = {}
for line in lines:
key, value = line.split()
self.data[key] = value
def get_path(self, key):
"""Get hdf5 file path for a given key."""
return self.data[key]
def __getitem__(self, key):
"""Get ndarray for a given key."""
p = self.data[key]
if ":" in p:
if len(p.split(",")) == 1:
return read_hdf5(*p.split(":"))
else:
p1, p2 = p.split(":")
feats = [read_hdf5(p1, p) for p in p2.split(",")]
return np.concatenate(
[f if len(f.shape) != 1 else f.reshape(-1, 1) for f in feats], 1
)
else:
return read_hdf5(p, self.default_hdf5_path)
def __len__(self):
"""Return the length of the scp file."""
return len(self.data)
def __iter__(self):
"""Return the iterator of the scp file."""
return iter(self.data)
def keys(self):
"""Return the keys of the scp file."""
return self.data.keys()
def values(self):
"""Return the values of the scp file."""
for key in self.keys():
yield self[key]
class NpyScpLoader(object):
"""Loader class for a fests.scp file of npy file.
Examples:
key1 /some/path/a.npy
key2 /some/path/b.npy
key3 /some/path/c.npy
key4 /some/path/d.npy
...
>>> loader = NpyScpLoader("feats.scp")
>>> array = loader["key1"]
"""
def __init__(self, feats_scp):
"""Initialize npy scp loader.
Args:
feats_scp (str): Kaldi-style feats.scp file with npy format.
"""
with open(feats_scp) as f:
lines = [line.replace("\n", "") for line in f.readlines()]
self.data = {}
for line in lines:
key, value = line.split()
self.data[key] = value
def get_path(self, key):
"""Get npy file path for a given key."""
return self.data[key]
def __getitem__(self, key):
"""Get ndarray for a given key."""
return np.load(self.data[key])
def __len__(self):
"""Return the length of the scp file."""
return len(self.data)
def __iter__(self):
"""Return the iterator of the scp file."""
return iter(self.data)
def keys(self):
"""Return the keys of the scp file."""
return self.data.keys()
def values(self):
"""Return the values of the scp file."""
for key in self.keys():
yield self[key]
def load_model(checkpoint, config=None, stats=None):
"""Load trained model.
Args:
checkpoint (str): Checkpoint path.
config (dict): Configuration dict.
stats (str): Statistics file path.
Return:
torch.nn.Module: Model instance.
"""
# load config if not provided
if config is None:
dirname = os.path.dirname(checkpoint)
config = os.path.join(dirname, "config.yml")
with open(config) as f:
config = yaml.load(f, Loader=yaml.Loader)
# lazy load for circular error
import parallel_wavegan.models
# get model and load parameters
generator_type = config.get("generator_type", "ParallelWaveGANGenerator")
model_class = getattr(
parallel_wavegan.models,
generator_type,
)
# workaround for typo #295
generator_params = {
k.replace("upsample_kernal_sizes", "upsample_kernel_sizes"): v
for k, v in config["generator_params"].items()
}
model = model_class(**generator_params)
model.load_state_dict(
torch.load(checkpoint, map_location="cpu")["model"]["generator"]
)
# check stats existence
if stats is None:
dirname = os.path.dirname(checkpoint)
if config["format"] == "hdf5":
ext = "h5"
else:
ext = "npy"
if os.path.exists(os.path.join(dirname, f"stats.{ext}")):
stats = os.path.join(dirname, f"stats.{ext}")
# load stats
if stats is not None and generator_type != "VQVAE":
model.register_stats(stats)
# add pqmf if needed
if config["generator_params"]["out_channels"] > 1:
# lazy load for circular error
from parallel_wavegan.layers import PQMF
pqmf_params = {}
if LooseVersion(config.get("version", "0.1.0")) <= LooseVersion("0.4.2"):
# For compatibility, here we set default values in version <= 0.4.2
pqmf_params.update(taps=62, cutoff_ratio=0.15, beta=9.0)
model.pqmf = PQMF(
subbands=config["generator_params"]["out_channels"],
**config.get("pqmf_params", pqmf_params),
)
return model
def download_pretrained_model(tag_or_url, download_dir=None):
"""Download pretrained model form google drive.
Args:
tag_or_url (str): Pretrained model tag or the google drive url for the model.
download_dir (str): Directory to save downloaded files.
Returns:
str: Path of downloaded model checkpoint.
Examples:
# Download by specifying tag
>>> from parallel_wavegan.utils import download_pretrained_model
>>> tag = "ljspeech_parallel_wavegan.v1"
>>> download_path = download_pretrained_model(tag)
# Download by specifying URL
>>> from parallel_wavegan.utils import download_pretrained_model
>>> url = "https://drive.google.com/file/d/10GYvB_mIKzXzSjD67tSnBhknZRoBjsNb"
>>> download_path = download_pretrained_model(url)
# The following URL also works
>>> url = "https://drive.google.com/file/d/10GYvB_mIKzXzSjD67tSnBhknZRoBjsNb/view?usp=sharing"
>>> download_path = download_pretrained_model(url)
"""
if download_dir is None:
download_dir = os.path.expanduser("~/.cache/parallel_wavegan")
if tag_or_url in PRETRAINED_MODEL_LIST:
id_ = PRETRAINED_MODEL_LIST[tag_or_url]
output_path = f"{download_dir}/{tag_or_url}.tar.gz"
tag = tag_or_url
else:
# get google drive id from the url link
assert (
"drive.google.com" in tag_or_url
), "Unknown URL format. Please use google drive for the model."
p = re.compile(r"/[-\w]{25,}")
id_ = p.findall(tag_or_url)[0][1:]
tag = id_
output_path = f"{download_dir}/{id_}.tar.gz"
os.makedirs(f"{download_dir}", exist_ok=True)
with FileLock(output_path + ".lock"):
if not os.path.exists(output_path):
# lazy load for compatibility
import gdown
gdown.download(
f"https://drive.google.com/uc?id={id_}", output_path, quiet=False
)
with tarfile.open(output_path, "r:*") as tar:
for member in tar.getmembers():
if member.isreg():
member.name = os.path.basename(member.name)
tar.extract(member, f"{download_dir}/{tag}")
checkpoint_path = find_files(f"{download_dir}/{tag}", "checkpoint*.pkl")
return checkpoint_path[0]
| 14,086 | 32.381517 | 102 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/losses/stft_loss.py | # -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""STFT-based Loss modules."""
from distutils.version import LooseVersion
import torch
import torch.nn.functional as F
is_pytorch_17plus = LooseVersion(torch.__version__) >= LooseVersion("1.7")
def stft(x, fft_size, hop_size, win_length, window):
"""Perform STFT and convert to magnitude spectrogram.
Args:
x (Tensor): Input signal tensor (B, T).
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length.
window (str): Window function type.
Returns:
Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).
"""
if is_pytorch_17plus:
x_stft = torch.stft(
x, fft_size, hop_size, win_length, window, return_complex=False
)
else:
x_stft = torch.stft(x, fft_size, hop_size, win_length, window)
real = x_stft[..., 0]
imag = x_stft[..., 1]
# NOTE(kan-bayashi): clamp is needed to avoid nan or inf
return torch.sqrt(torch.clamp(real**2 + imag**2, min=1e-7)).transpose(2, 1)
class SpectralConvergenceLoss(torch.nn.Module):
"""Spectral convergence loss module."""
def __init__(self):
"""Initilize spectral convergence loss module."""
super(SpectralConvergenceLoss, self).__init__()
def forward(self, x_mag, y_mag):
"""Calculate forward propagation.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Spectral convergence loss value.
"""
return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro")
class LogSTFTMagnitudeLoss(torch.nn.Module):
"""Log STFT magnitude loss module."""
def __init__(self):
"""Initilize los STFT magnitude loss module."""
super(LogSTFTMagnitudeLoss, self).__init__()
def forward(self, x_mag, y_mag):
"""Calculate forward propagation.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Log STFT magnitude loss value.
"""
return F.l1_loss(torch.log(y_mag), torch.log(x_mag))
class STFTLoss(torch.nn.Module):
"""STFT loss module."""
def __init__(
self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"
):
"""Initialize STFT loss module."""
super(STFTLoss, self).__init__()
self.fft_size = fft_size
self.shift_size = shift_size
self.win_length = win_length
self.spectral_convergence_loss = SpectralConvergenceLoss()
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
# NOTE(kan-bayashi): Use register_buffer to fix #223
self.register_buffer("window", getattr(torch, window)(win_length))
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T).
y (Tensor): Groundtruth signal (B, T).
Returns:
Tensor: Spectral convergence loss value.
Tensor: Log STFT magnitude loss value.
"""
x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window)
y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window)
sc_loss = self.spectral_convergence_loss(x_mag, y_mag)
mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
return sc_loss, mag_loss
class MultiResolutionSTFTLoss(torch.nn.Module):
"""Multi resolution STFT loss module."""
def __init__(
self,
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window",
):
"""Initialize Multi resolution STFT loss module.
Args:
fft_sizes (list): List of FFT sizes.
hop_sizes (list): List of hop sizes.
win_lengths (list): List of window lengths.
window (str): Window function type.
"""
super(MultiResolutionSTFTLoss, self).__init__()
assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
self.stft_losses = torch.nn.ModuleList()
for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):
self.stft_losses += [STFTLoss(fs, ss, wl, window)]
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T) or (B, #subband, T).
y (Tensor): Groundtruth signal (B, T) or (B, #subband, T).
Returns:
Tensor: Multi resolution spectral convergence loss value.
Tensor: Multi resolution log STFT magnitude loss value.
"""
if len(x.shape) == 3:
x = x.view(-1, x.size(2)) # (B, C, T) -> (B x C, T)
y = y.view(-1, y.size(2)) # (B, C, T) -> (B x C, T)
sc_loss = 0.0
mag_loss = 0.0
for f in self.stft_losses:
sc_l, mag_l = f(x, y)
sc_loss += sc_l
mag_loss += mag_l
sc_loss /= len(self.stft_losses)
mag_loss /= len(self.stft_losses)
return sc_loss, mag_loss
| 5,471 | 31 | 97 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/losses/duration_prediction_loss.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# 2023 Jiatong SHi
# Adapted from espnet/espnet/net/pytorch_backend/duration_predictor.py
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Duration predictor related modules."""
import torch
class DurationPredictorLoss(torch.nn.Module):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0, reduction="mean"):
"""Initilize duration predictor loss module.
Args:
offset (float, optional): Offset value to avoid nan in log domain.
reduction (str): Reduction type in loss calculation.
"""
super(DurationPredictorLoss, self).__init__()
self.criterion = torch.nn.MSELoss(reduction=reduction)
self.offset = offset
def forward(self, outputs, targets):
"""Calculate forward propagation.
Args:
outputs (Tensor): Batch of prediction durations in log domain (B, T)
targets (LongTensor): Batch of groundtruth durations in linear domain (B, T)
Returns:
Tensor: Mean squared error loss value.
Note:
`outputs` is in log domain but `targets` is in linear domain.
"""
# NOTE: outputs is in log domain while targets in linear
targets = torch.log(targets.float() + self.offset)
loss = self.criterion(outputs, targets)
return loss
| 1,534 | 27.962264 | 88 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/losses/adversarial_loss.py | # -*- coding: utf-8 -*-
# Copyright 2021 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Adversarial loss modules."""
import torch
import torch.nn.functional as F
class GeneratorAdversarialLoss(torch.nn.Module):
"""Generator adversarial loss module."""
def __init__(
self,
average_by_discriminators=True,
loss_type="mse",
):
"""Initialize GeneratorAversarialLoss module."""
super().__init__()
self.average_by_discriminators = average_by_discriminators
assert loss_type in ["mse", "hinge"], f"{loss_type} is not supported."
if loss_type == "mse":
self.criterion = self._mse_loss
else:
self.criterion = self._hinge_loss
def forward(self, outputs):
"""Calcualate generator adversarial loss.
Args:
outputs (Tensor or list): Discriminator outputs or list of
discriminator outputs.
Returns:
Tensor: Generator adversarial loss value.
"""
if isinstance(outputs, (tuple, list)):
adv_loss = 0.0
for i, outputs_ in enumerate(outputs):
if isinstance(outputs_, (tuple, list)):
# NOTE(kan-bayashi): case including feature maps
outputs_ = outputs_[-1]
adv_loss += self.criterion(outputs_)
if self.average_by_discriminators:
adv_loss /= i + 1
else:
adv_loss = self.criterion(outputs)
return adv_loss
def _mse_loss(self, x):
return F.mse_loss(x, x.new_ones(x.size()))
def _hinge_loss(self, x):
return -x.mean()
class DiscriminatorAdversarialLoss(torch.nn.Module):
"""Discriminator adversarial loss module."""
def __init__(
self,
average_by_discriminators=True,
loss_type="mse",
):
"""Initialize DiscriminatorAversarialLoss module."""
super().__init__()
self.average_by_discriminators = average_by_discriminators
assert loss_type in ["mse", "hinge"], f"{loss_type} is not supported."
if loss_type == "mse":
self.fake_criterion = self._mse_fake_loss
self.real_criterion = self._mse_real_loss
else:
self.fake_criterion = self._hinge_fake_loss
self.real_criterion = self._hinge_real_loss
def forward(self, outputs_hat, outputs):
"""Calcualate discriminator adversarial loss.
Args:
outputs_hat (Tensor or list): Discriminator outputs or list of
discriminator outputs calculated from generator outputs.
outputs (Tensor or list): Discriminator outputs or list of
discriminator outputs calculated from groundtruth.
Returns:
Tensor: Discriminator real loss value.
Tensor: Discriminator fake loss value.
"""
if isinstance(outputs, (tuple, list)):
real_loss = 0.0
fake_loss = 0.0
for i, (outputs_hat_, outputs_) in enumerate(zip(outputs_hat, outputs)):
if isinstance(outputs_hat_, (tuple, list)):
# NOTE(kan-bayashi): case including feature maps
outputs_hat_ = outputs_hat_[-1]
outputs_ = outputs_[-1]
real_loss += self.real_criterion(outputs_)
fake_loss += self.fake_criterion(outputs_hat_)
if self.average_by_discriminators:
fake_loss /= i + 1
real_loss /= i + 1
else:
real_loss = self.real_criterion(outputs)
fake_loss = self.fake_criterion(outputs_hat)
return real_loss, fake_loss
def _mse_real_loss(self, x):
return F.mse_loss(x, x.new_ones(x.size()))
def _mse_fake_loss(self, x):
return F.mse_loss(x, x.new_zeros(x.size()))
def _hinge_real_loss(self, x):
return -torch.mean(torch.min(x - 1, x.new_zeros(x.size())))
def _hinge_fake_loss(self, x):
return -torch.mean(torch.min(-x - 1, x.new_zeros(x.size())))
| 4,133 | 32.33871 | 84 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/losses/mel_loss.py | # Copyright 2021 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Mel-spectrogram loss modules."""
from distutils.version import LooseVersion
import librosa
import torch
import torch.nn.functional as F
is_pytorch_17plus = LooseVersion(torch.__version__) >= LooseVersion("1.7")
class MelSpectrogram(torch.nn.Module):
"""Calculate Mel-spectrogram."""
def __init__(
self,
fs=22050,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
center=True,
normalized=False,
onesided=True,
eps=1e-10,
log_base=10.0,
):
"""Initialize MelSpectrogram module."""
super().__init__()
self.fft_size = fft_size
if win_length is None:
self.win_length = fft_size
else:
self.win_length = win_length
self.hop_size = hop_size
self.center = center
self.normalized = normalized
self.onesided = onesided
if window is not None and not hasattr(torch, f"{window}_window"):
raise ValueError(f"{window} window is not implemented")
self.window = window
self.eps = eps
fmin = 0 if fmin is None else fmin
fmax = fs / 2 if fmax is None else fmax
melmat = librosa.filters.mel(
sr=fs,
n_fft=fft_size,
n_mels=num_mels,
fmin=fmin,
fmax=fmax,
)
self.register_buffer("melmat", torch.from_numpy(melmat.T).float())
self.stft_params = {
"n_fft": self.fft_size,
"win_length": self.win_length,
"hop_length": self.hop_size,
"center": self.center,
"normalized": self.normalized,
"onesided": self.onesided,
}
if is_pytorch_17plus:
self.stft_params["return_complex"] = False
self.log_base = log_base
if self.log_base is None:
self.log = torch.log
elif self.log_base == 2.0:
self.log = torch.log2
elif self.log_base == 10.0:
self.log = torch.log10
else:
raise ValueError(f"log_base: {log_base} is not supported.")
def forward(self, x):
"""Calculate Mel-spectrogram.
Args:
x (Tensor): Input waveform tensor (B, T) or (B, 1, T).
Returns:
Tensor: Mel-spectrogram (B, #mels, #frames).
"""
if x.dim() == 3:
# (B, C, T) -> (B*C, T)
x = x.reshape(-1, x.size(2))
if self.window is not None:
window_func = getattr(torch, f"{self.window}_window")
window = window_func(self.win_length, dtype=x.dtype, device=x.device)
else:
window = None
x_stft = torch.stft(x, window=window, **self.stft_params)
# (B, #freqs, #frames, 2) -> (B, $frames, #freqs, 2)
x_stft = x_stft.transpose(1, 2)
x_power = x_stft[..., 0] ** 2 + x_stft[..., 1] ** 2
x_amp = torch.sqrt(torch.clamp(x_power, min=self.eps))
x_mel = torch.matmul(x_amp, self.melmat)
x_mel = torch.clamp(x_mel, min=self.eps)
return self.log(x_mel).transpose(1, 2)
class MelSpectrogramLoss(torch.nn.Module):
"""Mel-spectrogram loss."""
def __init__(
self,
fs=22050,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
center=True,
normalized=False,
onesided=True,
eps=1e-10,
log_base=10.0,
):
"""Initialize Mel-spectrogram loss."""
super().__init__()
self.mel_spectrogram = MelSpectrogram(
fs=fs,
fft_size=fft_size,
hop_size=hop_size,
win_length=win_length,
window=window,
num_mels=num_mels,
fmin=fmin,
fmax=fmax,
center=center,
normalized=normalized,
onesided=onesided,
eps=eps,
log_base=log_base,
)
def forward(self, y_hat, y):
"""Calculate Mel-spectrogram loss.
Args:
y_hat (Tensor): Generated single tensor (B, 1, T).
y (Tensor): Groundtruth single tensor (B, 1, T).
Returns:
Tensor: Mel-spectrogram loss value.
"""
mel_hat = self.mel_spectrogram(y_hat)
mel = self.mel_spectrogram(y)
mel_loss = F.l1_loss(mel_hat, mel)
return mel_loss
| 4,625 | 26.86747 | 81 | py |
ParallelWaveGAN | ParallelWaveGAN-master/parallel_wavegan/losses/feat_match_loss.py | # -*- coding: utf-8 -*-
# Copyright 2021 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Feature matching loss modules."""
import torch
import torch.nn.functional as F
class FeatureMatchLoss(torch.nn.Module):
"""Feature matching loss module."""
def __init__(
self,
average_by_layers=True,
average_by_discriminators=True,
include_final_outputs=False,
):
"""Initialize FeatureMatchLoss module."""
super().__init__()
self.average_by_layers = average_by_layers
self.average_by_discriminators = average_by_discriminators
self.include_final_outputs = include_final_outputs
def forward(self, feats_hat, feats):
"""Calcualate feature matching loss.
Args:
feats_hat (list): List of list of discriminator outputs
calcuated from generater outputs.
feats (list): List of list of discriminator outputs
calcuated from groundtruth.
Returns:
Tensor: Feature matching loss value.
"""
feat_match_loss = 0.0
for i, (feats_hat_, feats_) in enumerate(zip(feats_hat, feats)):
feat_match_loss_ = 0.0
if not self.include_final_outputs:
feats_hat_ = feats_hat_[:-1]
feats_ = feats_[:-1]
for j, (feat_hat_, feat_) in enumerate(zip(feats_hat_, feats_)):
feat_match_loss_ += F.l1_loss(feat_hat_, feat_.detach())
if self.average_by_layers:
feat_match_loss_ /= j + 1
feat_match_loss += feat_match_loss_
if self.average_by_discriminators:
feat_match_loss /= i + 1
return feat_match_loss
| 1,746 | 30.763636 | 76 | py |
BanditZoo | BanditZoo-main/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
sys.path.insert(0, os.path.abspath("../banditzoo"))
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = "banditzoo"
copyright = "2021, Baihan Lin"
author = "Baihan Lin"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme",
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 2,160 | 32.765625 | 79 | py |
AtLoc | AtLoc-master/eval.py | import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
import torch
import os.path as osp
import numpy as np
import matplotlib
import sys
DISPLAY = 'DISPLAY' in os.environ
if not DISPLAY:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tools.options import Options
from network.atloc import AtLoc, AtLocPlus
from torchvision import transforms, models
from tools.utils import quaternion_angular_error, qexp, load_state_dict
from data.dataloaders import SevenScenes, RobotCar, MF
from torch.utils.data import DataLoader
from torch.autograd import Variable
# Config
opt = Options().parse()
cuda = torch.cuda.is_available()
device = "cuda:" + ",".join(str(i) for i in opt.gpus) if cuda else "cpu"
# Model
feature_extractor = models.resnet34(pretrained=False)
atloc = AtLoc(feature_extractor, droprate=opt.test_dropout, pretrained=False, lstm=opt.lstm)
if opt.model == 'AtLoc':
model = atloc
elif opt.model == 'AtLocPlus':
model = AtLocPlus(atlocplus=atloc)
else:
raise NotImplementedError
model.eval()
# loss functions
t_criterion = lambda t_pred, t_gt: np.linalg.norm(t_pred - t_gt)
q_criterion = quaternion_angular_error
stats_file = osp.join(opt.data_dir, opt.dataset, opt.scene, 'stats.txt')
stats = np.loadtxt(stats_file)
# transformer
data_transform = transforms.Compose([
transforms.Resize(opt.cropsize),
transforms.CenterCrop(opt.cropsize),
transforms.ToTensor(),
transforms.Normalize(mean=stats[0], std=np.sqrt(stats[1]))])
target_transform = transforms.Lambda(lambda x: torch.from_numpy(x).float())
# read mean and stdev for un-normalizing predictions
pose_stats_file = osp.join(opt.data_dir, opt.dataset, opt.scene, 'pose_stats.txt')
pose_m, pose_s = np.loadtxt(pose_stats_file) # mean and stdev
# Load the dataset
kwargs = dict(scene=opt.scene, data_path=opt.data_dir, train=False, transform=data_transform, target_transform=target_transform, seed=opt.seed)
if opt.model == 'AtLoc':
if opt.dataset == '7Scenes':
data_set = SevenScenes(**kwargs)
elif opt.dataset == 'RobotCar':
data_set = RobotCar(**kwargs)
else:
raise NotImplementedError
elif opt.model == 'AtLocPlus':
kwargs = dict(kwargs, dataset=opt.dataset, skip=opt.skip, steps=opt.steps, variable_skip=opt.variable_skip)
data_set = MF(real=opt.real, **kwargs)
else:
raise NotImplementedError
L = len(data_set)
kwargs = {'num_workers': opt.nThreads, 'pin_memory': True} if cuda else {}
loader = DataLoader(data_set, batch_size=1, shuffle=False, **kwargs)
pred_poses = np.zeros((L, 7)) # store all predicted poses
targ_poses = np.zeros((L, 7)) # store all target poses
# load weights
model.to(device)
weights_filename = osp.expanduser(opt.weights)
if osp.isfile(weights_filename):
checkpoint = torch.load(weights_filename, map_location=device)
load_state_dict(model, checkpoint['model_state_dict'])
print('Loaded weights from {:s}'.format(weights_filename))
else:
print('Could not load weights from {:s}'.format(weights_filename))
sys.exit(-1)
# inference loop
for idx, (data, target) in enumerate(loader):
if idx % 200 == 0:
print('Image {:d} / {:d}'.format(idx, len(loader)))
# output : 1 x 6
data_var = Variable(data, requires_grad=False)
data_var = data_var.to(device)
with torch.set_grad_enabled(False):
output = model(data_var)
s = output.size()
output = output.cpu().data.numpy().reshape((-1, s[-1]))
target = target.numpy().reshape((-1, s[-1]))
# normalize the predicted quaternions
q = [qexp(p[3:]) for p in output]
output = np.hstack((output[:, :3], np.asarray(q)))
q = [qexp(p[3:]) for p in target]
target = np.hstack((target[:, :3], np.asarray(q)))
# un-normalize the predicted and target translations
output[:, :3] = (output[:, :3] * pose_s) + pose_m
target[:, :3] = (target[:, :3] * pose_s) + pose_m
# take the middle prediction
pred_poses[idx, :] = output[len(output) / 2]
targ_poses[idx, :] = target[len(target) / 2]
# calculate losses
t_loss = np.asarray([t_criterion(p, t) for p, t in zip(pred_poses[:, :3], targ_poses[:, :3])])
q_loss = np.asarray([q_criterion(p, t) for p, t in zip(pred_poses[:, 3:], targ_poses[:, 3:])])
errors = np.zeros((L, 2))
print('Error in translation: median {:3.2f} m, mean {:3.2f} m \nError in rotation: median {:3.2f} degrees, mean {:3.2f} degree'\
.format(np.median(t_loss), np.mean(t_loss), np.median(q_loss), np.mean(q_loss)))
fig = plt.figure()
real_pose = (pred_poses[:, :3] - pose_m) / pose_s
gt_pose = (targ_poses[:, :3] - pose_m) / pose_s
plt.plot(gt_pose[:, 1], gt_pose[:, 0], color='black')
plt.plot(real_pose[:, 1], real_pose[:, 0], color='red')
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.plot(gt_pose[0, 1], gt_pose[0, 0], 'y*', markersize=15)
plt.show(block=True)
image_filename = osp.join(osp.expanduser(opt.results_dir), '{:s}.png'.format(opt.exp_name))
fig.savefig(image_filename) | 4,997 | 35.75 | 143 | py |
AtLoc | AtLoc-master/train.py | import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
import torch
import sys
import time
import os.path as osp
import numpy as np
from tensorboardX import SummaryWriter
from tools.options import Options
from network.atloc import AtLoc, AtLocPlus
from torchvision import transforms, models
from tools.utils import AtLocCriterion, AtLocPlusCriterion, AverageMeter, Logger
from data.dataloaders import SevenScenes, RobotCar, MF
from torch.utils.data import DataLoader
from torch.autograd import Variable
# Config
opt = Options().parse()
cuda = torch.cuda.is_available()
device = "cuda:" + ",".join(str(i) for i in opt.gpus) if cuda else "cpu"
logfile = osp.join(opt.runs_dir, 'log.txt')
stdout = Logger(logfile)
print('Logging to {:s}'.format(logfile))
sys.stdout = stdout
# Model
feature_extractor = models.resnet34(pretrained=True)
atloc = AtLoc(feature_extractor, droprate=opt.train_dropout, pretrained=True, lstm=opt.lstm)
if opt.model == 'AtLoc':
model = atloc
train_criterion = AtLocCriterion(saq=opt.beta, learn_beta=True)
val_criterion = AtLocCriterion()
param_list = [{'params': model.parameters()}]
elif opt.model == 'AtLocPlus':
model = AtLocPlus(atlocplus=atloc)
kwargs = dict(saq=opt.beta, srq=opt.gamma, learn_beta=True, learn_gamma=True)
train_criterion = AtLocPlusCriterion(**kwargs)
val_criterion = AtLocPlusCriterion()
else:
raise NotImplementedError
# Optimizer
param_list = [{'params': model.parameters()}]
if hasattr(train_criterion, 'sax') and hasattr(train_criterion, 'saq'):
print('learn_beta')
param_list.append({'params': [train_criterion.sax, train_criterion.saq]})
if opt.gamma is not None and hasattr(train_criterion, 'srx') and hasattr(train_criterion, 'srq'):
print('learn_gamma')
param_list.append({'params': [train_criterion.srx, train_criterion.srq]})
optimizer = torch.optim.Adam(param_list, lr=opt.lr, weight_decay=opt.weight_decay)
stats_file = osp.join(opt.data_dir, opt.dataset, opt.scene, 'stats.txt')
stats = np.loadtxt(stats_file)
tforms = [transforms.Resize(opt.cropsize)]
tforms.append(transforms.RandomCrop(opt.cropsize))
if opt.color_jitter > 0:
assert opt.color_jitter <= 1.0
print('Using ColorJitter data augmentation')
tforms.append(transforms.ColorJitter(brightness=opt.color_jitter, contrast=opt.color_jitter, saturation=opt.color_jitter, hue=0.5))
else:
print('Not Using ColorJitter')
tforms.append(transforms.ToTensor())
tforms.append(transforms.Normalize(mean=stats[0], std=np.sqrt(stats[1])))
data_transform = transforms.Compose(tforms)
target_transform = transforms.Lambda(lambda x: torch.from_numpy(x).float())
# Load the dataset
kwargs = dict(scene=opt.scene, data_path=opt.data_dir, transform=data_transform, target_transform=target_transform, seed=opt.seed)
if opt.model == 'AtLoc':
if opt.dataset == '7Scenes':
train_set = SevenScenes(train=True, **kwargs)
val_set = SevenScenes(train=False, **kwargs)
elif opt.dataset == 'RobotCar':
train_set = RobotCar(train=True, **kwargs)
val_set = RobotCar(train=False, **kwargs)
else:
raise NotImplementedError
elif opt.model == 'AtLocPlus':
kwargs = dict(kwargs, dataset=opt.dataset, skip=opt.skip, steps=opt.steps, variable_skip=opt.variable_skip)
train_set = MF(train=True, real=opt.real, **kwargs)
val_set = MF(train=False, real=opt.real, **kwargs)
else:
raise NotImplementedError
kwargs = {'num_workers': opt.nThreads, 'pin_memory': True} if cuda else {}
train_loader = DataLoader(train_set, batch_size=opt.batchsize, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=opt.batchsize, shuffle=False, **kwargs)
model.to(device)
train_criterion.to(device)
val_criterion.to(device)
total_steps = opt.steps
writer = SummaryWriter(log_dir=opt.runs_dir)
experiment_name = opt.exp_name
for epoch in range(opt.epochs):
if epoch % opt.val_freq == 0 or epoch == (opt.epochs - 1):
val_batch_time = AverageMeter()
val_loss = AverageMeter()
model.eval()
end = time.time()
val_data_time = AverageMeter()
for batch_idx, (val_data, val_target) in enumerate(val_loader):
val_data_time.update(time.time() - end)
val_data_var = Variable(val_data, requires_grad=False)
val_target_var = Variable(val_target, requires_grad=False)
val_data_var = val_data_var.to(device)
val_target_var = val_target_var.to(device)
with torch.set_grad_enabled(False):
val_output = model(val_data_var)
val_loss_tmp = val_criterion(val_output, val_target_var)
val_loss_tmp = val_loss_tmp.item()
val_loss.update(val_loss_tmp)
val_batch_time.update(time.time() - end)
writer.add_scalar('val_err', val_loss_tmp, total_steps)
if batch_idx % opt.print_freq == 0:
print('Val {:s}: Epoch {:d}\tBatch {:d}/{:d}\tData time {:.4f} ({:.4f})\tBatch time {:.4f} ({:.4f})\tLoss {:f}' \
.format(experiment_name, epoch, batch_idx, len(val_loader) - 1, val_data_time.val, val_data_time.avg, val_batch_time.val, val_batch_time.avg, val_loss_tmp))
end = time.time()
print('Val {:s}: Epoch {:d}, val_loss {:f}'.format(experiment_name, epoch, val_loss.avg))
if epoch % opt.save_freq == 0:
filename = osp.join(opt.models_dir, 'epoch_{:03d}.pth.tar'.format(epoch))
checkpoint_dict = {'epoch': epoch, 'model_state_dict': model.state_dict(), 'optim_state_dict': optimizer.state_dict(), 'criterion_state_dict': train_criterion.state_dict()}
torch.save(checkpoint_dict, filename)
print('Epoch {:d} checkpoint saved for {:s}'.format(epoch, experiment_name))
model.train()
train_data_time = AverageMeter()
train_batch_time = AverageMeter()
end = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
train_data_time.update(time.time() - end)
data_var = Variable(data, requires_grad=True)
target_var = Variable(target, requires_grad=False)
data_var = data_var.to(device)
target_var = target_var.to(device)
with torch.set_grad_enabled(True):
output = model(data_var)
loss_tmp = train_criterion(output, target_var)
loss_tmp.backward()
optimizer.step()
optimizer.zero_grad()
train_batch_time.update(time.time() - end)
writer.add_scalar('train_err', loss_tmp.item(), total_steps)
if batch_idx % opt.print_freq == 0:
print('Train {:s}: Epoch {:d}\tBatch {:d}/{:d}\tData time {:.4f} ({:.4f})\tBatch time {:.4f} ({:.4f})\tLoss {:f}' \
.format(experiment_name, epoch, batch_idx, len(train_loader) - 1, train_data_time.val, train_data_time.avg, train_batch_time.val, train_batch_time.avg, loss_tmp.item()))
end = time.time()
writer.close() | 7,019 | 42.333333 | 187 | py |
AtLoc | AtLoc-master/tools/saliency_map.py | import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
import torch
import os.path as osp
import numpy as np
import matplotlib
import sys
import cv2
from tools.options import Options
DISPLAY = 'DISPLAY' in os.environ
if not DISPLAY:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from network.atloc import AtLoc, AtLocPlus
from data.dataloaders import SevenScenes, RobotCar, MF
from tools.utils import load_state_dict
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torchvision import transforms, models
# config
opt = Options().parse()
cuda = torch.cuda.is_available()
device = "cuda:" + ",".join(str(i) for i in opt.gpus) if cuda else "cpu"
# Model
feature_extractor = models.resnet34(pretrained=False)
atloc = AtLoc(feature_extractor, droprate=opt.test_dropout, pretrained=False, lstm=opt.lstm)
if opt.model == 'AtLoc':
model = atloc
elif opt.model == 'AtLocPlus':
model = AtLocPlus(atlocplus=atloc)
else:
raise NotImplementedError
model.eval()
stats_file = osp.join(opt.data_dir, opt.dataset, opt.scene, 'stats.txt')
stats = np.loadtxt(stats_file)
# transformer
data_transform = transforms.Compose([
transforms.Resize(opt.cropsize),
transforms.CenterCrop(opt.cropsize),
transforms.ToTensor(),
transforms.Normalize(mean=stats[0], std=np.sqrt(stats[1]))])
target_transform = transforms.Lambda(lambda x: torch.from_numpy(x).float())
# Load the dataset
kwargs = dict(scene=opt.scene, data_path=opt.data_dir, train=False, transform=data_transform, target_transform=target_transform, seed=opt.seed)
if opt.model == 'AtLoc':
if opt.dataset == '7Scenes':
data_set = SevenScenes(**kwargs)
elif opt.dataset == 'RobotCar':
data_set = RobotCar(**kwargs)
else:
raise NotImplementedError
elif opt.model == 'AtLocPlus':
kwargs = dict(kwargs, dataset=opt.dataset, skip=opt.skip, steps=opt.steps, variable_skip=opt.variable_skip)
data_set = MF(real=opt.real, **kwargs)
else:
raise NotImplementedError
L = len(data_set)
kwargs = {'num_workers': opt.nThreads, 'pin_memory': True} if cuda else {}
loader = DataLoader(data_set, batch_size=1, shuffle=False, **kwargs)
# load weights
model.to(device)
weights_filename = osp.expanduser(opt.weights)
if osp.isfile(weights_filename):
checkpoint = torch.load(weights_filename, map_location=device)
load_state_dict(model, checkpoint['model_state_dict'])
print('Loaded weights from {:s}'.format(weights_filename))
else:
print('Could not load weights from {:s}'.format(weights_filename))
sys.exit(-1)
# opencv init
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out_filename = osp.join(opt.results_dir, '{:s}_{:s}_attention_{:s}.avi'.format(opt.dataset, opt.scene, opt.model))
# get frame size
img, _ = data_set[0]
vwrite = cv2.VideoWriter(out_filename, fourcc=fourcc, fps=20.0,
frameSize=(img.size(2), img.size(1)))
print('Initialized VideoWriter to {:s} with frames size {:d} x {:d}'.format(out_filename, img.size(2), img.size(1)))
# inference
cm_jet = plt.cm.get_cmap('jet')
for batch_idx, (data, target) in enumerate(loader):
data = data.to(device)
data_var = Variable(data, requires_grad=True)
model.zero_grad()
pose = model(data_var)
pose.mean().backward()
act = data_var.grad.data.cpu().numpy()
act = act.squeeze().transpose((1, 2, 0))
img = data[0].cpu().numpy()
img = img.transpose((1, 2, 0))
act *= img
act = np.amax(np.abs(act), axis=2)
act -= act.min()
act /= act.max()
act = cm_jet(act)[:, :, :3]
act *= 255
img *= stats[1]
img += stats[0]
img *= 255
img = img[:, :, ::-1]
img = 0.5 * img + 0.5 * act
img = np.clip(img, 0, 255)
vwrite.write(img.astype(np.uint8))
if batch_idx % 200 == 0:
print('{:d} / {:d}'.format(batch_idx, len(loader)))
vwrite.release()
print('{:s} written'.format(out_filename))
| 3,971 | 30.52381 | 143 | py |
AtLoc | AtLoc-master/tools/utils.py | import os
import torch
from torch import nn
import scipy.linalg as slin
import math
import transforms3d.quaternions as txq
import transforms3d.euler as txe
import numpy as np
import sys
from torch.nn import Module
from torch.autograd import Variable
from torch.nn.functional import pad
from torchvision.datasets.folder import default_loader
from collections import OrderedDict
class AtLocCriterion(nn.Module):
def __init__(self, t_loss_fn=nn.L1Loss(), q_loss_fn=nn.L1Loss(), sax=0.0, saq=0.0, learn_beta=False):
super(AtLocCriterion, self).__init__()
self.t_loss_fn = t_loss_fn
self.q_loss_fn = q_loss_fn
self.sax = nn.Parameter(torch.Tensor([sax]), requires_grad=learn_beta)
self.saq = nn.Parameter(torch.Tensor([saq]), requires_grad=learn_beta)
def forward(self, pred, targ):
loss = torch.exp(-self.sax) * self.t_loss_fn(pred[:, :3], targ[:, :3]) + self.sax + \
torch.exp(-self.saq) * self.q_loss_fn(pred[:, 3:], targ[:, 3:]) + self.saq
return loss
class AtLocPlusCriterion(nn.Module):
def __init__(self, t_loss_fn=nn.L1Loss(), q_loss_fn=nn.L1Loss(), sax=0.0, saq=0.0, srx=0.0, srq=0.0, learn_beta=False, learn_gamma=False):
super(AtLocPlusCriterion, self).__init__()
self.t_loss_fn = t_loss_fn
self.q_loss_fn = q_loss_fn
self.sax = nn.Parameter(torch.Tensor([sax]), requires_grad=learn_beta)
self.saq = nn.Parameter(torch.Tensor([saq]), requires_grad=learn_beta)
self.srx = nn.Parameter(torch.Tensor([srx]), requires_grad=learn_gamma)
self.srq = nn.Parameter(torch.Tensor([srq]), requires_grad=learn_gamma)
def forward(self, pred, targ):
# absolute pose loss
s = pred.size()
abs_loss = torch.exp(-self.sax) * self.t_loss_fn(pred.view(-1, *s[2:])[:, :3], targ.view(-1, *s[2:])[:, :3]) + self.sax + \
torch.exp(-self.saq) * self.q_loss_fn(pred.view(-1, *s[2:])[:, 3:], targ.view(-1, *s[2:])[:, 3:]) + self.saq
# get the VOs
pred_vos = calc_vos_simple(pred)
targ_vos = calc_vos_simple(targ)
# VO loss
s = pred_vos.size()
vo_loss = torch.exp(-self.srx) * self.t_loss_fn(pred_vos.view(-1, *s[2:])[:, :3], targ_vos.view(-1, *s[2:])[:, :3]) + self.srx + \
torch.exp(-self.srq) * self.q_loss_fn(pred_vos.view(-1, *s[2:])[:, 3:], targ_vos.view(-1, *s[2:])[:, 3:]) + self.srq
# total loss
loss = abs_loss + vo_loss
return loss
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "w")
def delink(self):
self.log.close()
def writeTerminalOnly(self, message):
self.terminal.write(message)
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def load_image(filename, loader=default_loader):
try:
img = loader(filename)
except IOError as e:
print('Could not load image {:s}, IOError: {:s}'.format(filename, e))
return None
except:
print('Could not load image {:s}, unexpected error'.format(filename))
return None
return img
def qlog(q):
if all(q[1:] == 0):
q = np.zeros(3)
else:
q = np.arccos(q[0]) * q[1:] / np.linalg.norm(q[1:])
return q
def qexp(q):
n = np.linalg.norm(q)
q = np.hstack((np.cos(n), np.sinc(n/np.pi)*q))
return q
def calc_vos_simple(poses):
vos = []
for p in poses:
pvos = [p[i+1].unsqueeze(0) - p[i].unsqueeze(0) for i in range(len(p)-1)]
vos.append(torch.cat(pvos, dim=0))
vos = torch.stack(vos, dim=0)
return vos
def quaternion_angular_error(q1, q2):
d = abs(np.dot(q1, q2))
d = min(1.0, max(-1.0, d))
theta = 2 * np.arccos(d) * 180 / np.pi
return theta
def process_poses(poses_in, mean_t, std_t, align_R, align_t, align_s):
poses_out = np.zeros((len(poses_in), 6))
poses_out[:, 0:3] = poses_in[:, [3, 7, 11]]
# align
for i in range(len(poses_out)):
R = poses_in[i].reshape((3, 4))[:3, :3]
q = txq.mat2quat(np.dot(align_R, R))
q *= np.sign(q[0]) # constrain to hemisphere
q = qlog(q)
poses_out[i, 3:] = q
t = poses_out[i, :3] - align_t
poses_out[i, :3] = align_s * np.dot(align_R, t[:, np.newaxis]).squeeze()
# normalize translation
poses_out[:, :3] -= mean_t
poses_out[:, :3] /= std_t
return poses_out
def load_state_dict(model, state_dict):
model_names = [n for n,_ in model.named_parameters()]
state_names = [n for n in state_dict.keys()]
# find prefix for the model and state dicts from the first param name
if model_names[0].find(state_names[0]) >= 0:
model_prefix = model_names[0].replace(state_names[0], '')
state_prefix = None
elif state_names[0].find(model_names[0]) >= 0:
state_prefix = state_names[0].replace(model_names[0], '')
model_prefix = None
else:
model_prefix = model_names[0].split('.')[0]
state_prefix = state_names[0].split('.')[0]
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if state_prefix is None:
k = model_prefix + k
else:
k = k.replace(state_prefix, model_prefix)
new_state_dict[k] = v
model.load_state_dict(new_state_dict) | 6,007 | 31.652174 | 142 | py |
AtLoc | AtLoc-master/tools/options.py | import argparse
import os
from tools import utils
import torch
class Options():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
def initialize(self):
# base options
self.parser.add_argument('--data_dir', type=str, default='./data')
self.parser.add_argument('--batchsize', type=int, default=64)
self.parser.add_argument('--cropsize', type=int, default=256)
self.parser.add_argument('--print_freq', type=int, default=20)
self.parser.add_argument('--gpus', type=str, default='-1')
self.parser.add_argument('--nThreads', default=8, type=int, help='threads for loading data')
self.parser.add_argument('--dataset', type=str, default='RobotCar')
self.parser.add_argument('--scene', type=str, default='loop')
self.parser.add_argument('--model', type=str, default='AtLoc')
self.parser.add_argument('--seed', type=int, default=7)
self.parser.add_argument('--lstm', type=bool, default=False)
self.parser.add_argument('--logdir', type=str, default='./logs')
self.parser.add_argument('--exp_name', type=str, default='name')
self.parser.add_argument('--skip', type=int, default=10)
self.parser.add_argument('--variable_skip', type=bool, default=False)
self.parser.add_argument('--real', type=bool, default=False)
self.parser.add_argument('--steps', type=int, default=3)
self.parser.add_argument('--val', type=bool, default=False)
# train options
self.parser.add_argument('--epochs', type=int, default=100)
self.parser.add_argument('--beta', type=float, default=-3.0)
self.parser.add_argument('--gamma', type=float, default=None, help='only for AtLoc+ (-3.0)')
self.parser.add_argument('--color_jitter', type=float, default=0.7, help='0.7 is only for RobotCar, 0.0 for 7Scenes')
self.parser.add_argument('--train_dropout', type=float, default=0.5)
self.parser.add_argument('--val_freq', type=int, default=5)
self.parser.add_argument('--results_dir', type=str, default='figures')
self.parser.add_argument('--models_dir', type=str, default='models')
self.parser.add_argument('--runs_dir', type=str, default='runs')
self.parser.add_argument('--lr', type=float, default=5e-5)
self.parser.add_argument('--weight_decay', type=float, default=0.0005)
# test options
self.parser.add_argument('--test_dropout', type=float, default=0.0)
self.parser.add_argument('--weights', type=str, default='epoch_005.pth.tar')
self.parser.add_argument('--save_freq', type=int, default=5)
def parse(self):
self.initialize()
self.opt = self.parser.parse_args()
str_ids = self.opt.gpus.split(',')
self.opt.gpus = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpus.append(id)
# set gpu ids
if len(self.opt.gpus) > 0:
torch.cuda.set_device(self.opt.gpus[0])
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ---------------')
# save to the disk
self.opt.exp_name = '{:s}_{:s}_{:s}_{:s}'.format(self.opt.dataset, self.opt.scene, self.opt.model, str(self.opt.lstm))
expr_dir = os.path.join(self.opt.logdir, self.opt.exp_name)
self.opt.results_dir = os.path.join(expr_dir, self.opt.results_dir)
self.opt.models_dir = os.path.join(expr_dir, self.opt.models_dir)
self.opt.runs_dir = os.path.join(expr_dir, self.opt.runs_dir)
utils.mkdirs([self.opt.logdir, expr_dir, self.opt.runs_dir, self.opt.models_dir, self.opt.results_dir])
return self.opt
| 3,916 | 49.217949 | 126 | py |
AtLoc | AtLoc-master/network/att.py | import torch
from torch import nn
from torch.nn import functional as F
class AttentionBlock(nn.Module):
def __init__(self, in_channels):
super(AttentionBlock, self).__init__()
self.g = nn.Linear(in_channels, in_channels // 8)
self.theta = nn.Linear(in_channels, in_channels // 8)
self.phi = nn.Linear(in_channels, in_channels // 8)
self.W = nn.Linear(in_channels // 8, in_channels)
def forward(self, x):
batch_size = x.size(0)
out_channels = x.size(1)
g_x = self.g(x).view(batch_size, out_channels // 8, 1)
theta_x = self.theta(x).view(batch_size, out_channels // 8, 1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, out_channels // 8, 1)
f = torch.matmul(phi_x, theta_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.view(batch_size, out_channels // 8)
W_y = self.W(y)
z = W_y + x
return z | 996 | 31.16129 | 70 | py |
AtLoc | AtLoc-master/network/atloc.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init
from network.att import AttentionBlock
class FourDirectionalLSTM(nn.Module):
def __init__(self, seq_size, origin_feat_size, hidden_size):
super(FourDirectionalLSTM, self).__init__()
self.feat_size = origin_feat_size // seq_size
self.seq_size = seq_size
self.hidden_size = hidden_size
self.lstm_rightleft = nn.LSTM(self.feat_size, self.hidden_size, batch_first=True, bidirectional=True)
self.lstm_downup = nn.LSTM(self.seq_size, self.hidden_size, batch_first=True, bidirectional=True)
def init_hidden_(self, batch_size, device):
return (torch.randn(2, batch_size, self.hidden_size).to(device),
torch.randn(2, batch_size, self.hidden_size).to(device))
def forward(self, x):
batch_size = x.size(0)
x_rightleft = x.view(batch_size, self.seq_size, self.feat_size)
x_downup = x_rightleft.transpose(1, 2)
hidden_rightleft = self.init_hidden_(batch_size, x.device)
hidden_downup = self.init_hidden_(batch_size, x.device)
_, (hidden_state_lr, _) = self.lstm_rightleft(x_rightleft, hidden_rightleft)
_, (hidden_state_ud, _) = self.lstm_downup(x_downup, hidden_downup)
hlr_fw = hidden_state_lr[0, :, :]
hlr_bw = hidden_state_lr[1, :, :]
hud_fw = hidden_state_ud[0, :, :]
hud_bw = hidden_state_ud[1, :, :]
return torch.cat([hlr_fw, hlr_bw, hud_fw, hud_bw], dim=1)
class AtLoc(nn.Module):
def __init__(self, feature_extractor, droprate=0.5, pretrained=True, feat_dim=2048, lstm=False):
super(AtLoc, self).__init__()
self.droprate = droprate
self.lstm = lstm
# replace the last FC layer in feature extractor
self.feature_extractor = feature_extractor
self.feature_extractor.avgpool = nn.AdaptiveAvgPool2d(1)
fe_out_planes = self.feature_extractor.fc.in_features
self.feature_extractor.fc = nn.Linear(fe_out_planes, feat_dim)
if self.lstm:
self.lstm4dir = FourDirectionalLSTM(seq_size=32, origin_feat_size=feat_dim, hidden_size=256)
self.fc_xyz = nn.Linear(feat_dim // 2, 3)
self.fc_wpqr = nn.Linear(feat_dim // 2, 3)
else:
self.att = AttentionBlock(feat_dim)
self.fc_xyz = nn.Linear(feat_dim, 3)
self.fc_wpqr = nn.Linear(feat_dim, 3)
# initialize
if pretrained:
init_modules = [self.feature_extractor.fc, self.fc_xyz, self.fc_wpqr]
else:
init_modules = self.modules()
for m in init_modules:
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias.data, 0)
def forward(self, x):
x = self.feature_extractor(x)
x = F.relu(x)
if self.lstm:
x = self.lstm4dir(x)
else:
x = self.att(x.view(x.size(0), -1))
if self.droprate > 0:
x = F.dropout(x, p=self.droprate)
xyz = self.fc_xyz(x)
wpqr = self.fc_wpqr(x)
return torch.cat((xyz, wpqr), 1)
class AtLocPlus(nn.Module):
def __init__(self, atlocplus):
super(AtLocPlus, self).__init__()
self.atlocplus = atlocplus
def forward(self, x):
s = x.size()
x = x.view(-1, *s[2:])
poses = self.atlocplus(x)
poses = poses.view(s[0], s[1], -1)
return poses
| 3,584 | 37.138298 | 109 | py |
AtLoc | AtLoc-master/data/dataset_mean.py | import os.path as osp
import numpy as np
from data.dataloaders import RobotCar, SevenScenes
from torchvision import transforms
from torch.utils.data import DataLoader
from tools.options import Options
opt = Options().parse()
data_transform = transforms.Compose([
transforms.Resize(opt.cropsize),
transforms.RandomCrop(opt.cropsize),
transforms.ToTensor()])
# dataset loader
kwargs = dict(scene=opt.scene, data_path=opt.data_dir, train=True, real=False, transform=data_transform)
if opt.dataset == '7Scenes':
dset = SevenScenes(**kwargs)
elif opt.dataset == 'RobotCar':
dset = RobotCar(**kwargs)
else:
raise NotImplementedError
# accumulate
loader = DataLoader(dset, batch_size=opt.batch_size, num_workers=opt.nThreads)
acc = np.zeros((3, opt.cropsize, opt.cropsize))
sq_acc = np.zeros((3, opt.cropsize, opt.cropsize))
for batch_idx, (imgs, _) in enumerate(loader):
imgs = imgs.numpy()
acc += np.sum(imgs, axis=0)
sq_acc += np.sum(imgs ** 2, axis=0)
if batch_idx % 50 == 0:
print('Accumulated {:d} / {:d}'.format(batch_idx * opt.batch_size, len(dset)))
N = len(dset) * acc.shape[1] * acc.shape[2]
mean_p = np.asarray([np.sum(acc[c]) for c in range(3)])
mean_p /= N
print('Mean pixel = ', mean_p)
# std = E[x^2] - E[x]^2
std_p = np.asarray([np.sum(sq_acc[c]) for c in range(3)])
std_p /= N
std_p -= (mean_p ** 2)
print('Std. pixel = ', std_p)
output_filename = osp.join(opt.data_dir, opt.dataset, opt.scene, 'stats.txt')
np.savetxt(output_filename, np.vstack((mean_p, std_p)), fmt='%8.7f')
print('{:s} written'.format(output_filename))
| 1,592 | 29.634615 | 104 | py |
AtLoc | AtLoc-master/data/dataloaders.py | import os
import torch
import numpy as np
import pickle
import os.path as osp
from data.robotcar_sdk.interpolate_poses import interpolate_vo_poses, interpolate_ins_poses
from data.robotcar_sdk.camera_model import CameraModel
from data.robotcar_sdk.image import load_image as robotcar_loader
from tools.utils import process_poses, calc_vos_simple, load_image
from torch.utils import data
from functools import partial
class SevenScenes(data.Dataset):
def __init__(self, scene, data_path, train, transform=None, target_transform=None, mode=0, seed=7, real=False, skip_images=False, vo_lib='orbslam'):
self.mode = mode
self.transform = transform
self.target_transform = target_transform
self.skip_images = skip_images
np.random.seed(seed)
# directories
data_dir = osp.join(data_path, '7Scenes', scene)
# decide which sequences to use
if train:
split_file = osp.join(data_dir, 'train_split.txt')
else:
split_file = osp.join(data_dir, 'test_split.txt')
with open(split_file, 'r') as f:
seqs = [int(l.split('sequence')[-1]) for l in f if not l.startswith('#')]
# read poses and collect image names
self.c_imgs = []
self.d_imgs = []
self.gt_idx = np.empty((0,), dtype=np.int)
ps = {}
vo_stats = {}
gt_offset = int(0)
for seq in seqs:
seq_dir = osp.join(data_dir, 'seq-{:02d}'.format(seq))
p_filenames = [n for n in os.listdir(osp.join(seq_dir, '.')) if n.find('pose') >= 0]
if real:
pose_file = osp.join(data_dir, '{:s}_poses'.format(vo_lib), 'seq-{:02d}.txt'.format(seq))
pss = np.loadtxt(pose_file)
frame_idx = pss[:, 0].astype(np.int)
if vo_lib == 'libviso2':
frame_idx -= 1
ps[seq] = pss[:, 1:13]
vo_stats_filename = osp.join(seq_dir, '{:s}_vo_stats.pkl'.format(vo_lib))
with open(vo_stats_filename, 'rb') as f:
vo_stats[seq] = pickle.load(f)
else:
frame_idx = np.array(range(len(p_filenames)), dtype=np.int)
pss = [np.loadtxt(osp.join(seq_dir, 'frame-{:06d}.pose.txt'.
format(i))).flatten()[:12] for i in frame_idx]
ps[seq] = np.asarray(pss)
vo_stats[seq] = {'R': np.eye(3), 't': np.zeros(3), 's': 1}
self.gt_idx = np.hstack((self.gt_idx, gt_offset+frame_idx))
gt_offset += len(p_filenames)
c_imgs = [osp.join(seq_dir, 'frame-{:06d}.color.png'.format(i))
for i in frame_idx]
d_imgs = [osp.join(seq_dir, 'frame-{:06d}.depth.png'.format(i))
for i in frame_idx]
self.c_imgs.extend(c_imgs)
self.d_imgs.extend(d_imgs)
pose_stats_filename = osp.join(data_dir, 'pose_stats.txt')
if train and not real:
mean_t = np.zeros(3) # optionally, use the ps dictionary to calc stats
std_t = np.ones(3)
np.savetxt(pose_stats_filename, np.vstack((mean_t, std_t)), fmt='%8.7f')
else:
mean_t, std_t = np.loadtxt(pose_stats_filename)
# convert pose to translation + log quaternion
self.poses = np.empty((0, 6))
for seq in seqs:
pss = process_poses(poses_in=ps[seq], mean_t=mean_t, std_t=std_t,
align_R=vo_stats[seq]['R'], align_t=vo_stats[seq]['t'],
align_s=vo_stats[seq]['s'])
self.poses = np.vstack((self.poses, pss))
def __getitem__(self, index):
if self.skip_images:
img = None
pose = self.poses[index]
else:
if self.mode == 0:
img = None
while img is None:
img = load_image(self.c_imgs[index])
pose = self.poses[index]
index += 1
index -= 1
elif self.mode == 1:
img = None
while img is None:
img = load_image(self.d_imgs[index])
pose = self.poses[index]
index += 1
index -= 1
elif self.mode == 2:
c_img = None
d_img = None
while (c_img is None) or (d_img is None):
c_img = load_image(self.c_imgs[index])
d_img = load_image(self.d_imgs[index])
pose = self.poses[index]
index += 1
img = [c_img, d_img]
index -= 1
else:
raise Exception('Wrong mode {:d}'.format(self.mode))
if self.target_transform is not None:
pose = self.target_transform(pose)
if self.skip_images:
return img, pose
if self.transform is not None:
if self.mode == 2:
img = [self.transform(i) for i in img]
else:
img = self.transform(img)
return img, pose
def __len__(self):
return self.poses.shape[0]
class RobotCar(data.Dataset):
def __init__(self, scene, data_path, train, transform=None, target_transform=None, real=False, skip_images=False, seed=7, undistort=False, vo_lib='stereo'):
np.random.seed(seed)
self.transform = transform
self.target_transform = target_transform
self.skip_images = skip_images
self.undistort = undistort
# directories
data_dir = osp.join(data_path, 'RobotCar', scene)
# decide which sequences to use
if train:
split_filename = osp.join(data_dir, 'train_split.txt')
else:
split_filename = osp.join(data_dir, 'test_split.txt')
with open(split_filename, 'r') as f:
seqs = [l.rstrip() for l in f if not l.startswith('#')]
ps = {}
ts = {}
vo_stats = {}
self.imgs = []
for seq in seqs:
seq_dir = osp.join(data_dir, seq)
# read the image timestamps
ts_filename = osp.join(seq_dir, 'stereo.timestamps')
with open(ts_filename, 'r') as f:
ts[seq] = [int(l.rstrip().split(' ')[0]) for l in f]
if real: # poses from integration of VOs
if vo_lib == 'stereo':
vo_filename = osp.join(seq_dir, 'vo', 'vo.csv')
p = np.asarray(interpolate_vo_poses(vo_filename, ts[seq], ts[seq][0]))
elif vo_lib == 'gps':
vo_filename = osp.join(seq_dir, 'gps', 'gps_ins.csv')
p = np.asarray(interpolate_ins_poses(vo_filename, ts[seq], ts[seq][0]))
else:
raise NotImplementedError
vo_stats_filename = osp.join(seq_dir, '{:s}_vo_stats.pkl'.format(vo_lib))
with open(vo_stats_filename, 'r') as f:
vo_stats[seq] = pickle.load(f)
ps[seq] = np.reshape(p[:, :3, :], (len(p), -1))
else: # GT poses
pose_filename = osp.join(seq_dir, 'gps', 'ins.csv')
p = np.asarray(interpolate_ins_poses(pose_filename, ts[seq], ts[seq][0]))
ps[seq] = np.reshape(p[:, :3, :], (len(p), -1))
vo_stats[seq] = {'R': np.eye(3), 't': np.zeros(3), 's': 1}
self.imgs.extend([osp.join(seq_dir, 'stereo', 'centre_processed', '{:d}.png'.format(t)) for t in ts[seq]])
# read / save pose normalization information
poses = np.empty((0, 12))
for p in ps.values():
poses = np.vstack((poses, p))
pose_stats_filename = osp.join(data_dir, 'pose_stats.txt')
if train and not real:
mean_t = np.mean(poses[:, [3, 7, 11]], axis=0)
std_t = np.std(poses[:, [3, 7, 11]], axis=0)
np.savetxt(pose_stats_filename, np.vstack((mean_t, std_t)), fmt='%8.7f')
else:
mean_t, std_t = np.loadtxt(pose_stats_filename)
# convert the pose to translation + log quaternion, align, normalize
self.poses = np.empty((0, 6))
for seq in seqs:
pss = process_poses(poses_in=ps[seq], mean_t=mean_t, std_t=std_t,
align_R=vo_stats[seq]['R'], align_t=vo_stats[seq]['t'],
align_s=vo_stats[seq]['s'])
self.poses = np.vstack((self.poses, pss))
self.gt_idx = np.asarray(range(len(self.poses)))
# camera model and image loader (only use while pre_processing)
camera_model = CameraModel('./data/robotcar_camera_models', osp.join('stereo', 'centre'))
self.im_loader = partial(robotcar_loader, model=camera_model)
def __getitem__(self, index):
if self.skip_images:
img = None
pose = self.poses[index]
else:
img = None
while img is None:
if self.undistort:
img = np.uint8(load_image(self.imgs[index], loader=self.im_loader))
else:
img = load_image(self.imgs[index])
pose = np.float32(self.poses[index])
index += 1
index -= 1
if self.target_transform is not None:
pose = self.target_transform(pose)
if self.skip_images:
return img, pose
if self.transform is not None:
img = self.transform(img)
return img, pose
def __len__(self):
return len(self.poses)
class MF(data.Dataset):
def __init__(self, dataset, include_vos=False, no_duplicates=False, *args, **kwargs):
self.steps = kwargs.pop('steps', 2)
self.skip = kwargs.pop('skip', 1)
self.variable_skip = kwargs.pop('variable_skip', False)
self.real = kwargs.pop('real', False)
self.include_vos = include_vos
self.train = kwargs['train']
self.vo_func = kwargs.pop('vo_func', calc_vos_simple)
self.no_duplicates = no_duplicates
if dataset == '7Scenes':
self.dset = SevenScenes(*args, real=self.real, **kwargs)
if self.include_vos and self.real:
self.gt_dset = SevenScenes(*args, skip_images=True, real=False, **kwargs)
elif dataset == 'RobotCar':
self.dset = RobotCar(*args, real=self.real, **kwargs)
if self.include_vos and self.real:
self.gt_dset = RobotCar(*args, skip_images=True, real=False, **kwargs)
else:
raise NotImplementedError
self.L = self.steps * self.skip
def get_indices(self, index):
if self.variable_skip:
skips = np.random.randint(1, high=self.skip+1, size=self.steps-1)
else:
skips = self.skip * np.ones(self.steps-1)
offsets = np.insert(skips, 0, 0).cumsum()
offsets -= offsets[len(offsets) / 2]
if self.no_duplicates:
offsets += self.steps/2 * self.skip
offsets = offsets.astype(np.int)
idx = index + offsets
idx = np.minimum(np.maximum(idx, 0), len(self.dset)-1)
assert np.all(idx >= 0), '{:d}'.format(index)
assert np.all(idx < len(self.dset))
return idx
def __getitem__(self, index):
idx = self.get_indices(index)
clip = [self.dset[i] for i in idx]
imgs = torch.stack([c[0] for c in clip], dim=0)
poses = torch.stack([c[1] for c in clip], dim=0)
if self.include_vos:
vos = self.vo_func(poses.unsqueeze(0))[0]
if self.real: # absolute poses need to come from the GT dataset
clip = [self.gt_dset[self.dset.gt_idx[i]] for i in idx]
poses = torch.stack([c[1] for c in clip], dim=0)
poses = torch.cat((poses, vos), dim=0)
return imgs, poses
def __len__(self):
L = len(self.dset)
if self.no_duplicates:
L -= (self.steps-1)*self.skip
return L
| 12,124 | 39.416667 | 160 | py |
AtLoc | AtLoc-master/data/process_robotcar.py | import os.path as osp
import numpy as np
from PIL import Image
from data.dataloaders import RobotCar
from torch.utils.data import DataLoader
from torchvision import transforms
from tools.options import Options
opt = Options().parse()
if opt.val:
print('processing VAL data using {:d} cores'.format(opt.nThreads))
else:
print('processing TRAIN data using {:d} cores'.format(opt.nThreads))
# create data loader
transform = transforms.Compose([transforms.ToPILImage(),
transforms.Resize(opt.cropsize),
transforms.Lambda(lambda x: np.asarray(x))])
dset = RobotCar(scene=opt.scene, data_path=opt.data_dir, train=not opt.val, transform=transform, undistort=True)
loader = DataLoader(dset, batch_size=opt.batchsize, num_workers=opt.nThreads)
# gather information about output filenames
base_dir = osp.join(opt.data_dir, opt.dataset, opt.scene)
if opt.val:
split_filename = osp.join(base_dir, 'test_split.txt')
else:
split_filename = osp.join(base_dir, 'train_split.txt')
with open(split_filename, 'r') as f:
seqs = [l.rstrip() for l in f if not l.startswith('#')]
im_filenames = []
for seq in seqs:
seq_dir = osp.join(base_dir, seq)
ts_filename = osp.join(seq_dir, 'stereo.timestamps')
with open(ts_filename, 'r') as f:
ts = [l.rstrip().split(' ')[0] for l in f]
im_filenames.extend([osp.join(seq_dir, 'stereo', 'centre_processed', '{:s}.png'.
format(t)) for t in ts])
assert len(dset) == len(im_filenames)
# loop
for batch_idx, (imgs, _) in enumerate(loader):
for idx, im in enumerate(imgs):
im_filename = im_filenames[batch_idx * opt.batchsize + idx]
im = Image.fromarray(im.numpy())
try:
im.save(im_filename)
except IOError:
print('IOError while saving {:s}'.format(im_filename))
if batch_idx % 50 == 0:
print('Processed {:d} / {:d}'.format(batch_idx * opt.batchsize, len(dset)))
| 1,998 | 35.345455 | 112 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_ae/batch_entropy.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import scipy
import scipy.stats
import random
def batch_entropy(x):
""" Estimate the differential entropy by assuming a gaussian distribution of
values for different samples of a mini-batch.
"""
if(x.shape[0] <= 1):
raise Exception("The batch entropy can only be calculated for |batch| > 1.")
x = torch.flatten(x, start_dim=1)
x_std = torch.std(x, dim=0)
entropies = 0.5 * torch.log(np.pi * np.e * x_std**2 + 1)
return torch.mean(entropies)
class LBELoss(nn.Module):
""" Computation of the LBE + Criterion loss.
See also https://www.wolframalpha.com/input/?i=%28%28x-0.8%29*0.5%29**2+for+x+from+0+to+2+y+from+0+to+0.5
"""
def __init__(self, num, lbe_alpha=0.5, lbe_alpha_min=0.2, lbe_beta=0.5):
super(LBELoss, self).__init__()
lbe_alpha = torch.ones(num) * lbe_alpha
self.lbe_alpha_p = torch.nn.Parameter(lbe_alpha, requires_grad=True)
self.lbe_alpha_min = torch.FloatTensor([lbe_alpha_min]).to("cuda")
self.lbe_beta = lbe_beta
def lbe_per_layer(self, a, i):
lbe_alpha_l = torch.abs(self.lbe_alpha_p[i])
lbe_l = (batch_entropy(a)-torch.maximum(self.lbe_alpha_min, lbe_alpha_l))**2
return lbe_l * self.lbe_beta
def __call__(self, loss, A):
losses = [self.lbe_per_layer(a, i) for i, a in enumerate(A)]
lbe = torch.mean(torch.stack(losses)) * loss
return loss+lbe, lbe
class NoLBELoss(nn.Module):
""" Wrapper around the Cross Entropy loss to be compatible with models
that output intermediate results.
"""
def __init__(self):
super(NoLBELoss, self).__init__()
self.lbe_alpha = torch.zeros(1)
self.lbe_alpha_min = torch.zeros(1)
self.lbe_beta = torch.zeros(1)
self.lbe_alpha_p = torch.zeros(1)
def __call__(self, loss, A):
return loss, 0.0
| 1,965 | 31.766667 | 113 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_ae/dataloader.py | # coding: utf-8
import numpy as np
import torch
import torch.utils.data
import torchvision
import torchvision.models
from torchvision import transforms
from torchvision import datasets
def get_loader(dataset, batch_size, num_workers):
if dataset == "mnist":
return get_mnist_loader(batch_size, num_workers)
elif dataset == "fashionmnist":
return get_fashionmnist_loader(batch_size, num_workers)
# elif dataset == "cifar10":
# return get_cifar10_loader(batch_size, num_workers)
# elif dataset == "cifar100":
# return get_cifar100_loader(batch_size, num_workers)
else:
raise Exception(f"Dataset {dataset} not found.")
def get_mnist_loader(batch_size, num_workers):
transform=transforms.Compose([
transforms.ToTensor()
])
dataset = datasets.MNIST('.data', train=True, download=True, transform=transform)
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [50000, 10000])
train_eval_kwargs = dict(
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True
)
train_loader = torch.utils.data.DataLoader(
train_dataset, **train_eval_kwargs
)
val_loader = torch.utils.data.DataLoader(
val_dataset, **train_eval_kwargs
)
test_dataset = datasets.MNIST('.data', train=False, transform=transform)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
pin_memory=True,
drop_last=True,
)
return train_loader, val_loader, test_loader
def get_fashionmnist_loader(batch_size, num_workers):
transform=transforms.Compose([
transforms.ToTensor()
])
dataset = datasets.FashionMNIST('.data', train=True, download=True, transform=transform)
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [50000, 10000])
train_eval_kwargs = dict(
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True
)
train_loader = torch.utils.data.DataLoader(
train_dataset, **train_eval_kwargs
)
val_loader = torch.utils.data.DataLoader(
val_dataset, **train_eval_kwargs
)
test_dataset = datasets.FashionMNIST('.data', train=False, transform=transform)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
pin_memory=True,
drop_last=True,
)
return train_loader, val_loader, test_loader
# def get_cifar10_loader(batch_size, num_workers):
# tmp_set = torchvision.datasets.CIFAR10('.data',
# train=True,
# download=True,
# transform=transforms.Compose([transforms.ToTensor()]))
# tmp_loader = torch.utils.data.DataLoader(tmp_set, batch_size=len(tmp_set), num_workers=1)
# tmp_data = next(iter(tmp_loader))
# mean, std = tmp_data[0].mean(), tmp_data[0].std()
# train_transform = torchvision.transforms.Compose([
# torchvision.transforms.RandomCrop(32, padding=4),
# torchvision.transforms.RandomHorizontalFlip(),
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean, std),
# ])
# test_transform = torchvision.transforms.Compose([
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean, std),
# ])
# train_dataset = torchvision.datasets.CIFAR10(
# ".data", train=True, transform=train_transform, download=True)
# test_dataset = torchvision.datasets.CIFAR10(
# ".data", train=False, transform=test_transform, download=True)
# train_loader = torch.utils.data.DataLoader(
# train_dataset,
# batch_size=batch_size,
# shuffle=True,
# num_workers=num_workers,
# pin_memory=True,
# drop_last=True,
# )
# test_loader = torch.utils.data.DataLoader(
# test_dataset,
# batch_size=batch_size,
# num_workers=num_workers,
# shuffle=False,
# pin_memory=True,
# drop_last=False,
# )
# return train_loader, test_loader
# def get_cifar100_loader(batch_size, num_workers):
# tmp_set = torchvision.datasets.CIFAR100('.data',
# train=True,
# download=True,
# transform=transforms.Compose([transforms.ToTensor()]))
# tmp_loader = torch.utils.data.DataLoader(tmp_set, batch_size=len(tmp_set), num_workers=1)
# tmp_data = next(iter(tmp_loader))
# mean, std = tmp_data[0].mean(), tmp_data[0].std()
# train_transform = torchvision.transforms.Compose([
# torchvision.transforms.RandomCrop(32, padding=4),
# torchvision.transforms.RandomHorizontalFlip(),
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean, std),
# ])
# test_transform = torchvision.transforms.Compose([
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean, std),
# ])
# train_dataset = torchvision.datasets.CIFAR100(
# ".data", train=True, transform=train_transform, download=True)
# test_dataset = torchvision.datasets.CIFAR100(
# ".data", train=False, transform=test_transform, download=True)
# train_loader = torch.utils.data.DataLoader(
# train_dataset,
# batch_size=batch_size,
# shuffle=True,
# num_workers=num_workers,
# pin_memory=True,
# drop_last=True,
# )
# test_loader = torch.utils.data.DataLoader(
# test_dataset,
# batch_size=batch_size,
# num_workers=num_workers,
# shuffle=False,
# pin_memory=True,
# drop_last=False,
# )
# return train_loader, test_loader
| 5,910 | 31.300546 | 95 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_ae/utils.py | import enum
import os
import logging
import io
from random import random
import warnings
from matplotlib.colors import ListedColormap
import numpy as np
import torch
import torch.nn as nn
from torchvision.utils import make_grid
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from PIL import Image
import seaborn as sns
import wandb
logger = logging.getLogger("UTILS")
warnings.filterwarnings(action='ignore', category=FutureWarning)
sns.set()
# def is_debug():
# return str2bool(os.environ.get("DEBUG", "false"))
def str2bool(s):
if s.lower() in ['true', 'yes', 'y']:
return True
elif s.lower() in ['false', 'no', 'n']:
return False
else:
raise RuntimeError('Boolean value expected')
DEBUG = str2bool(os.environ.get("DEBUG", "false"))
# Whether to immediately commit results to wandb, running
# many runs in parallel may result in rate limit error.
# Setting WANDB_COMMIT to False will only commit result after
# run has finished
# WANDB_COMMIT = str2bool(os.environ.get("WANDB_COMMIT", "true"))
WANDB_COMMIT = False
SHOW_PLOT = str2bool(os.environ.get("SHOW_PLOT", "false"))
def mpl_fig_to_wandb_image(fig):
img_buf = io.BytesIO()
fig.savefig(img_buf, format="png")
im = Image.open(img_buf)
return wandb.Image(im)
class ReconstructImages:
def __init__(self, num_images = 5, mean_only = True):
self.num_images = num_images
self.mean_only = mean_only
def __call__(
self, stage, model, data_loader, step,
) -> None:
model.eval()
dataset = data_loader.dataset
indices = torch.randperm(len(dataset))[: self.num_images]
with torch.no_grad():
x = torch.stack([dataset[idx][0] for idx in indices], dim=0)
x = x.cuda()
x_hat = model.reconstruct(x, mean_only=self.mean_only)
x_hat = x_hat.view_as(x)
xs = torch.cat([x, x_hat])
grid = make_grid(
xs, nrow=self.num_images, padding=1, pad_value=0.5
)
image = wandb.Image(grid, caption="Top: Ground-truth, Bottom: Reconstruction")
if not DEBUG:
wandb.log({f"Reconstruction/{stage}": image}, step=step, commit=WANDB_COMMIT)
class PlotLatentSpace:
def __init__(self, num_batches = None, quantiles = [0.025, 0.975], size_recon = 12):
self.num_batches = num_batches
self.quantiles = quantiles
self.size_recon = size_recon
def __call__(self, stage, model, data_loader, step):
model.eval()
zs, ys = [], []
with torch.no_grad():
for batch_idx, (x, y) in enumerate(data_loader):
x = x.cuda()
z = model.encode(x, mean_only=True)
zs.append(z.cpu().numpy())
ys.append(y.numpy())
if self.num_batches is not None and batch_idx == self.num_batches - 1:
break
zs = np.concatenate(zs, axis=0)
ys = np.concatenate(ys, axis=0)
# Create histogram of each latent variable
zs_mean = np.mean(zs, axis=0)
zs_std = np.std(zs, axis=0)
assert len(zs_mean) == zs.shape[-1] and len(zs_std) == zs.shape[-1]
ncols = min(3, zs.shape[-1])
nrows = zs.shape[-1] // ncols
nrows += 0 if zs.shape[-1] % ncols == 0 else 1
fig = plt.Figure(figsize=(9, nrows * 3))
for latent_dim in range(zs.shape[-1]):
ax = fig.add_subplot(nrows, ncols, latent_dim + 1)
ax.hist(zs[:, latent_dim])
mean = zs_mean[latent_dim]
std = zs_std[latent_dim]
ax.set_title(f"{mean:.3f} +/- {std:.3f}")
ax.autoscale()
fig.tight_layout()
logs = {f"{stage} z space/Histogram": mpl_fig_to_wandb_image(fig)}
plt.close(fig)
use_compression = zs.shape[-1] > 2
# Create scatter plot using t-SNE with ground-truth labels
tsne_error = False
if use_compression:
try:
tsne = TSNE(n_components=2, init="pca", random_state=0)
zs_tsne = tsne.fit_transform(zs)
except Exception:
tsne_error = True
else:
zs_tsne = zs
if not tsne_error:
color_palette = sns.color_palette(n_colors=len(np.unique(ys)))
fig, ax = plt.subplots(1, 1, figsize=(9, 9))
scatter = ax.scatter(x=zs_tsne[:, 0], y=zs_tsne[:, 1], c=ys, cmap=ListedColormap(color_palette))
ax.legend(*scatter.legend_elements())
ax.autoscale()
fig.tight_layout()
logs[f"{stage} z space/t-SNE"] = mpl_fig_to_wandb_image(fig)
plt.close(fig)
# If latent space has more than 2 components
# use the 2 main principal components
pca_error = False
if use_compression:
pca = PCA(n_components=2)
try:
zs_pca = pca.fit_transform(zs)
except Exception as e:
logger.error(e)
pca_error = True
else:
zs_pca = zs
if not pca_error:
# Traverse latent space
z_quantiles = np.quantile(zs_pca, self.quantiles, axis=0).T
zs_traverse = []
for y in np.linspace(*z_quantiles[1], self.size_recon):
for x in np.linspace(*z_quantiles[0], self.size_recon):
zs_traverse.append([x, y])
# Transforms back to correct latent size
if use_compression:
zs_traverse = pca.inverse_transform(zs_traverse)
with torch.no_grad():
zs_traverse = torch.tensor(zs_traverse).float()
zs_traverse = zs_traverse.cuda()
x_hat_traverse = model.decode(zs_traverse)
grid = make_grid(x_hat_traverse, nrow=self.size_recon, padding=0)
color_palette = sns.color_palette(n_colors=len(np.unique(ys)))
fig, ax = plt.subplots(1, 1, figsize=(9, 9))
scatter = ax.scatter(x=zs_pca[:, 0], y=zs_pca[:, 1], c=ys, cmap=ListedColormap(color_palette))
ax.legend(*scatter.legend_elements())
ax.autoscale()
fig.tight_layout()
logs[f"{stage} z space/PCA"] = mpl_fig_to_wandb_image(fig)
plt.close(fig)
logs[f"{stage} z space/traversal"] = wandb.Image(grid)
# Wandb Logging
if not DEBUG:
wandb.log(
logs,
step=step,
commit=WANDB_COMMIT
)
if SHOW_PLOT:
plt.show()
| 6,717 | 29.675799 | 108 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_ae/create_loss_surface.py | from __future__ import print_function
import argparse
from email.mime import base
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torchmetrics.functional import ssim as compute_ssim
import time
import copy
from tqdm import tqdm
import numpy as np
import random
from batch_entropy import batch_entropy, LBELoss, NoLBELoss
from plot import generate_plots
from autoencoder import AE
# Very good FAQ for loss surface plots:
# https://losslandscape.com/faq/
# Thanks to https://gitlab.com/qbeer/loss-landscape/-/blob/main/loss_landscape/landscape_utils.py
def init_directions(model):
noises = []
n_params = 0
for name, param in model.named_parameters():
delta = torch.normal(.0, 1, size=param.size())
nu = torch.normal(.0, 1, size=param.size())
param_norm = torch.norm(param)
delta_norm = torch.norm(delta)
nu_norm = torch.norm(nu)
delta /= delta_norm
delta *= param_norm
nu /= nu_norm
nu *= param_norm
noises.append((delta, nu))
n_params += np.prod(param.size())
print(f'A total of {n_params:,} parameters.')
return noises
def init_network(model, all_noises, alpha, beta):
with torch.no_grad():
for param, noises in zip(model.parameters(), all_noises):
delta, nu = noises
new_value = param + alpha * delta + beta * nu
param.copy_(new_value)
return model
def train_epoch(args, model, criterion, device, train_loader, optimizer, steps):
model.train()
for batch_idx, (x, _) in enumerate(train_loader):
steps += 1
if steps > args.steps:
return steps
x = x.to(device)
optimizer.zero_grad()
output = model(x)
x_hat = output["x_hat"]
A = output["A"]
loss, _, _ = criterion((x_hat, A), x)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
h_a = batch_entropy(A[-1])
print('Step: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} H: {:.6f}'.format(
steps, batch_idx * len(x), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), h_a))
return steps
def evaluate(args, model, criterion, device, dataset_loader):
model.train()
mses = []
ssim = []
lbes = []
losses = []
eval_size = 20
# Evaluate test acc / loss
with torch.no_grad():
for batch_idx, (x, _) in enumerate(dataset_loader):
x = x.to(device)
output = model(x)
x_hat = output["x_hat"]
A = output["A"]
loss, mse, lbe_loss = criterion((x_hat, A), x)
losses.append(loss)
mses.append(mse)
lbes.append(lbe_loss)
ssim.append(compute_ssim(x_hat, x).item())
if len(lbes) > eval_size:
break
mses = np.mean(mses)
ssim = np.mean(ssim)
lbes = np.mean(lbes)
losses = mses+lbes
return mses, ssim, lbes, losses
def main():
# Training settings
parser = argparse.ArgumentParser(description='Batch Entropy with PyTorch and MNIST')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1024, metavar='N',
help='input batch size for testing (default: 1024)')
parser.add_argument('--steps', type=int, default=10, metavar='N',
help='number of steps to train (default: 14)')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--no-cuda', action='store_true', default=True,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--lbe_beta', type=float, default=0.0,
help='LBE beta value')
parser.add_argument('--lbe_alpha', type=float, default=0.5)
parser.add_argument('--lbe_alpha_min', type=float, default=1.5)
parser.add_argument('--depth', type=int, default=25)
parser.add_argument('--width', type=int, default=256)
parser.add_argument('--latent_size', type=int, default=10)
parser.add_argument('--resolution', type=int, default=10, metavar='N',
help='Resolution of loss plot')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
random.seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print("Using device %s" % device)
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_train = datasets.MNIST('.data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(ds_train,**train_kwargs)
# Compute entropy of data
# print("Compute entropy of data.")
# entropy_data = []
# for i, (data, _) in enumerate(train_loader):
# entropy_data.append(batch_entropy(data))
# if(i > 10):
# break
# lbe_alpha = np.mean(entropy_data)
# print(f"Set lbe_alpha to {lbe_alpha}")
steps = 0
model = AE(width=args.width, depth=args.depth, latent_size=args.latent_size).to(device)
num_layers = len(model.encoder.fcs) + len(model.decoder.fcs)
if args.lbe_beta != 0.0:
criterion = LBELoss(nn.MSELoss(), num_layers,lbe_alpha=args.lbe_alpha, lbe_beta=args.lbe_beta, lbe_alpha_min=args.lbe_alpha_min)
else:
criterion= NoLBELoss(nn.MSELoss())
noises = init_directions(model)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
epoch = 0
while(steps < args.steps):
#
# Plot loss surface
#
if epoch % 1 == 0:
def load_model():
return copy.deepcopy(model)
RESOLUTION = args.resolution
A, B = np.meshgrid(np.linspace(-1, 1, RESOLUTION),
np.linspace(-1, 1, RESOLUTION), indexing='ij')
mses = np.empty_like(A)
ssims = np.empty_like(A)
lbes = np.empty_like(A)
losses = np.empty_like(A)
for i in range(RESOLUTION):
for j in range(RESOLUTION):
alpha = A[i, j]
beta = B[i, j]
net = init_network(load_model(), noises, alpha, beta).to(device)
mse, ssim, lbe, loss = evaluate(args, net, criterion, device, train_loader)
lbes[i, j] = lbe
mses[i, j] = mse
ssims[i, j] = ssim
losses[i, j] = loss
del net
print(f'alpha : {alpha:.2f}, beta : {beta:.2f}, mse : {mse:.2f}, lbe : {lbe:.2f}')
torch.cuda.empty_cache()
path = f"./generated/lbe_{args.lbe_beta}/depth_{args.depth}/steps_{steps}"
if not os.path.exists(path):
os.makedirs(path)
np.save(f"{path}/mse.npy", mses)
np.save(f"{path}/lbe.npy", lbes)
np.save(f"{path}/loss.npy", losses)
np.save(f"{path}/ssim.npy", ssims)
np.save(f"{path}/X.npy", A)
np.save(f"{path}/Y.npy", B)
args.path = path
print("Generate plots...")
generate_plots(args)
#
# Train one epoch
#
steps += train_epoch(args, model, criterion, device, train_loader, optimizer, steps)
mse, ssim, lbe, loss = evaluate(args, model, criterion, device, train_loader)
print(f"steps={steps} | loss={loss} | lbe={lbe} | ce={mse} | acc={ssim}")
epoch += 1
if __name__ == '__main__':
main()
| 8,623 | 32.952756 | 136 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_ae/autoencoder.py | # coding: utf-8
from typing import Dict, List, NewType, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
#####################
###### MODULES ######
#####################
class Encoder(nn.Module):
def __init__(
self,
width: int = 256,
depth: int = 1,
latent_size: int = 10,
img_size: Tuple[int, int, int] = (1, 28, 28),
) -> None:
super().__init__()
self.flatten = nn.Flatten(start_dim=1)
fcs = [nn.Linear(np.prod(img_size), width)]
if depth > 1:
fcs += [nn.Linear(width, width) for _ in range(depth - 1)]
self.fcs = nn.ModuleList(fcs)
self.fc_out = nn.Linear(width, latent_size)
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
h = self.flatten(x)
a = []
for fc in self.fcs:
# residual = x
h = fc(h)
h = F.relu(h)
# x += residual
a.append(h)
z = self.fc_out(h)
return z, a
class Decoder(nn.Module):
def __init__(
self,
width: int = 256,
depth: int = 1,
latent_size: int = 10,
img_size: Tuple[int, int, int] = (1, 28, 28),
) -> None:
super().__init__()
fcs = [nn.Linear(latent_size, width)]
if depth > 1:
fcs += [nn.Linear(width, width) for _ in range(depth - 1)]
self.fcs = nn.ModuleList(fcs)
self.fc_out = nn.Linear(width, np.prod(img_size))
self.unflatten = nn.Unflatten(dim=1, unflattened_size=img_size)
def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
h = z
a = []
for fc in self.fcs:
# residual = x
h = fc(h)
h = F.relu(h)
# x += residual
a.append(h)
h = self.fc_out(h)
x = torch.sigmoid(h)
x = self.unflatten(x)
return x, a
class ConvEncoder(nn.Module):
receptive_fields = {
(28, 28): 128 * 5 * 5,
(64, 64): 128 * 9 * 9,
# (218, 178): 128 * 29 * 24
}
def __init__(
self,
width: int = 256,
depth: int = 1,
latent_size: int = 10,
img_size: Tuple[int, int, int] = (1, 28, 28),
) -> None:
super().__init__()
self.convs = nn.ModuleList(
[
nn.Conv2d(
in_channels=img_size[0],
out_channels=32,
kernel_size=4,
stride=2,
padding=2,
),
nn.Conv2d(
in_channels=32, out_channels=64, kernel_size=4, stride=2, padding=2
),
nn.Conv2d(
in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=2
),
]
)
self.flatten = nn.Flatten(start_dim=1)
receptive_field = self.receptive_fields[img_size[1:]]
widths = [receptive_field] + [width] * (depth - 1) + [latent_size]
fcs = [nn.Linear(h_in, h_out) for h_in, h_out in zip(widths[:-1], widths[1:])]
self.fcs = nn.ModuleList(fcs[:-1])
self.fc_out = fcs[-1]
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
a = []
h = x
for conv_layer in self.convs:
h = conv_layer(h)
h = F.relu(h)
a.append(h)
h = self.flatten(h)
for fc in self.fcs:
h = fc(h)
h = F.relu(h)
a.append(h)
z = self.fc_out(h)
return z, a
class ConvDecoder(nn.Module):
available_output_sizes = {
(28, 28): [(5, 5), (8, 8), (15, 15), (28, 28)],
(64, 64): [(9, 9), (17, 17), (33, 33), (64, 64)],
# (218, 178): [(29, 24), (56, 46), (110, 90), (218, 178)]
}
def __init__(
self,
width: int = 256,
depth: int = 1,
latent_size: int = 10,
img_size: Tuple[int, int, int] = (1, 28, 28),
) -> None:
super().__init__()
receptive_field = ConvEncoder.receptive_fields[img_size[1:]]
widths = [latent_size] + [width] * (depth - 1) + [receptive_field]
fcs = [nn.Linear(h_in, h_out) for h_in, h_out in zip(widths[:-1], widths[1:])]
self.fcs = nn.ModuleList(fcs)
self.output_sizes = self.available_output_sizes[img_size[1:]]
unflattened_size = (128,) + self.output_sizes[0]
self.unflatten = nn.Unflatten(dim=1, unflattened_size=unflattened_size)
self.convs = nn.ModuleList(
[
nn.ConvTranspose2d(
in_channels=128, out_channels=64, kernel_size=4, stride=2, padding=2
),
nn.ConvTranspose2d(
in_channels=64, out_channels=32, kernel_size=4, stride=2, padding=2
),
]
)
self.conv_out = nn.ConvTranspose2d(
in_channels=32, out_channels=img_size[0], kernel_size=4, stride=2, padding=2
)
def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
a = []
h = z
for fc in self.fcs:
h = fc(h)
h = F.relu(h)
a.append(h)
h = self.unflatten(h)
for conv_layer, output_size in zip(self.convs, self.output_sizes[1:-1]):
h = conv_layer(h, output_size=output_size)
h = F.relu(h)
a.append(h)
h = self.conv_out(h, output_size=self.output_sizes[-1])
x = torch.sigmoid(h)
return x, a
####################
###### MODELS ######
####################
class AE(nn.Module):
"""Auto-encoder"""
encoder_cls = Encoder
decoder_cls = Decoder
def __init__(
self,
width: int = 256,
depth: int = 1,
latent_size: int = 10,
img_size: Tuple[int, int, int] = (1, 28, 28),
) -> None:
super().__init__()
self.encoder = self.encoder_cls(
width=width, depth=depth, latent_size=latent_size, img_size=img_size
)
self.decoder = self.decoder_cls(
width=width, depth=depth, latent_size=latent_size, img_size=img_size
)
def forward(
self, x: torch.Tensor
) -> Tuple[
torch.Tensor,
List[torch.Tensor],
torch.Tensor,
torch.Tensor,
Optional[Tuple[torch.Tensor, torch.Tensor]],
]:
z, a_enc = self.encoder(x)
x_hat, a_dec = self.decoder(z)
A = a_enc + [z] + a_dec
return dict(
x_hat=x_hat,
A=A,
z=z
)
def encode(self, x: torch.Tensor, mean_only: bool = False) -> torch.Tensor:
z, _ = self.encoder(x)
return z
def decode(self, z: torch.Tensor) -> torch.Tensor:
x_hat, _ = self.decoder(z)
return x_hat
def reconstruct(self, x: torch.Tensor, mean_only: bool = False) -> torch.Tensor:
z = self.encode(x, mean_only)
x_hat = self.decode(z)
return x_hat
class DAE(AE):
"""De-noising Auto-encoder"""
def __init__(
self,
noise_scale: float = 0.2,
*args, **kwargs
) -> None:
super().__init__(
*args, **kwargs
)
self.noise_scale = noise_scale
def add_noise(self, x: torch.Tensor) -> torch.Tensor:
x_noisy = x + self.noise_scale * torch.normal(0.0, 1.0, size=x.size()).to(
x.device
).type_as(x)
x_noisy = torch.clamp(x_noisy, 0.0, 1.0)
return x_noisy
def forward(
self, x: torch.Tensor
) -> Tuple[
torch.Tensor,
List[torch.Tensor],
torch.Tensor,
torch.Tensor,
Optional[Tuple[torch.Tensor, torch.Tensor]],
]:
if self.training:
x = self.add_noise(x)
z, a_enc = self.encoder(x)
x_hat, a_dec = self.decoder(z)
A = a_enc + a_dec
return dict(
x_hat=x_hat,
A=A,
z=z
)
class VAE(nn.Module):
"""Variational Auto-encoder
The encoding capacity can be regulated according to [1] to prevent posterior collapse.
The capacity of the kl divergence can be controlled and set to a desired amount of nats.
[1] C. P. Burgess et al., “Understanding disentangling in $\beta$-VAE,” arXiv:1804.03599 [cs, stat], Apr. 2018, Accessed: Feb. 28, 2020. [Online]. Available: http://arxiv.org/abs/1804.03599
"""
encoder_cls = Encoder
decoder_cls = Decoder
def __init__(
self,
width: int = 256,
depth: int = 1,
latent_size: int = 10,
img_size: Tuple[int, int, int] = (1, 28, 28),
) -> None:
super().__init__()
self.encoder = self.encoder_cls(
width=width, depth=depth, latent_size=latent_size * 2, img_size=img_size
)
self.decoder = self.decoder_cls(
width=width, depth=depth, latent_size=latent_size, img_size=img_size
)
def reparameterize(self, loc: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
std = torch.exp(logvar / 2)
z = loc + std * torch.randn_like(std)
return z
def kld(self, loc: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
kld = -0.5 * torch.sum(1 + logvar - loc.pow(2) - logvar.exp()) / loc.shape[0]
return kld
def forward(
self, x: torch.Tensor
) -> Tuple[
torch.Tensor,
List[torch.Tensor],
torch.Tensor,
torch.Tensor,
Optional[Tuple[torch.Tensor, torch.Tensor]],
]:
h, a_enc = self.encoder(x)
loc, logvar = h.chunk(2, dim=-1)
z = self.reparameterize(loc, logvar)
x_hat, a_dec = self.decoder(z)
kld = self.kld(loc, logvar)
A = a_enc + a_dec
return dict(
x_hat=x_hat,
A=A,
kld=kld,
z=z,
loc=loc,
logvar=logvar
)
def encode(self, x: torch.Tensor, mean_only: bool = False) -> torch.Tensor:
h, _ = self.encoder(x)
loc, logvar = h.chunk(2, dim=-1)
if mean_only:
return loc
z = self.reparameterize(loc, logvar)
return z
def decode(self, z: torch.Tensor) -> torch.Tensor:
x_hat, _ = self.decoder(z)
return x_hat
def reconstruct(self, x: torch.Tensor, mean_only: bool = False) -> torch.Tensor:
z = self.encode(x, mean_only)
x_hat = self.decode(z)
return x_hat
class CAE(AE):
"""Convolutional Auto-encoder"""
encoder_cls = ConvEncoder
decoder_cls = ConvDecoder
class CDAE(DAE, CAE):
"""Convolutional De-noising Auto-encoder"""
pass
class CVAE(VAE):
"""Convolutional Auto-encoder"""
encoder_cls = ConvEncoder
decoder_cls = ConvDecoder | 10,963 | 26.138614 | 193 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_ae/train.py | #!/usr/bin/env python
# coding: utf-8
from configparser import ParsingError
from enum import auto
from json import encoder
import os
import time
import importlib
import json
from collections import OrderedDict
import logging
import argparse
import numpy as np
import random
import wandb
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
import torch.backends.cudnn
from torchmetrics.functional import ssim as compute_ssim
from dataloader import get_loader
from batch_entropy import LBELoss, NoLBELoss, batch_entropy
import autoencoder
from utils import PlotLatentSpace, DEBUG, WANDB_COMMIT, str2bool, ReconstructImages
logging.basicConfig(
format='[%(asctime)s %(name)s %(levelname)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger("AE")
global_step = 0
seen_samples = 0
def parse_args():
parser = argparse.ArgumentParser()
# data config
parser.add_argument('--dataset', type=str, default="mnist", choices=["mnist", "fashionmnist"])
# model config
parser.add_argument('--arch', type=str, default="AE", choices=['AE', 'DAE', 'CAE', 'CDAE', 'VAE'])
parser.add_argument('--depth', type=int, required=True)
parser.add_argument('--width', type=int, default=256)
parser.add_argument('--latent_size', type=int, default=10)
# run config
parser.add_argument('--seed', type=int, default=17)
parser.add_argument('--num_workers', type=int, default=7)
parser.add_argument('--device', type=str, default="cuda")
# optim config
parser.add_argument('--criterion', type=str, default="mse", choices=["mse", "bce"])
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--base_lr', type=float, default=0.001)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--lbe_beta', type=float, default=0.3, help='Weight lbe loss.')
parser.add_argument('--lbe_alpha', type=float, default=2.5)
parser.add_argument('--lbe_alpha_min', type=float, default=0.5)
# Utils config
parser.add_argument("--callback_interval", type=int, default=10, help="Callback interval in number of epochs")
# TensorBoard
parser.add_argument(
'--tensorboard', dest='tensorboard', action='store_true')
args, unknown_args = parser.parse_known_args()
# If 'noise_scale' is given as unknown arg, parse it
noise_scale = None
if len(unknown_args) > 0:
if not args.arch == "DAE":
raise ParsingError(f"Can only supply extra cmdline argument for DAE. Unsupported arguments: {unknown_args}")
if "--noise_scale" not in unknown_args[0]:
raise ParsingError("Only 'noise_scale', used by DAE, can be defined as extra cmdline argument")
if len(unknown_args) == 1 and "=" in unknown_args[0]:
noise_scale = float(unknown_args[0].split("=")[-1])
elif len(unknown_args) == 2:
noise_scale = float(unknown_args[1])
else:
raise ParsingError("Multiple unknown cmdline arguments are given, can only use '--noise_scale VAL' or '--noise_scale=VAL'.")
if not DEBUG:
wandb.init(config=args)
dataset =args.dataset.lower()
img_size = (1, 28, 28) if dataset == "mnist" else \
(1, 28, 28) if dataset == "fashionmnist" else \
(3, 32, 32) if dataset == "cifar10" else \
(3, 32, 32) if dataset == "cifar100" else \
(3, 32, 32)
model_config = OrderedDict([
('arch', args.arch),
('depth', args.depth),
('width', args.width),
('latent_size', args.latent_size),
('img_size', img_size),
])
if noise_scale is not None and args.arch == "DAE":
model_config["noise_scale"] = noise_scale
optim_config = OrderedDict([
('criterion', args.criterion),
('epochs', args.epochs),
('batch_size', args.batch_size),
('base_lr', args.base_lr),
('weight_decay', args.weight_decay),
('lbe_beta', args.lbe_beta),
('lbe_alpha', args.lbe_alpha),
('lbe_alpha_min', args.lbe_alpha_min)
])
data_config = OrderedDict([
('dataset', dataset),
])
run_config = OrderedDict([
('seed', args.seed),
('num_workers', args.num_workers),
])
utils_config = OrderedDict([
("callback_interval", args.callback_interval),
])
config = OrderedDict([
('model_config', model_config),
('optim_config', optim_config),
('data_config', data_config),
('run_config', run_config),
('utils_config', utils_config)
])
return config
def load_model(config):
Network = getattr(autoencoder, config["arch"])
config.pop("arch")
return Network(**config)
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def train(epoch, model, optimizer, criterion, lbe_fn, train_loader):
global global_step
global seen_samples
logger.info('### TRAIN ####')
is_vae = isinstance(model, autoencoder.VAE)
model.train()
criterion_meter = AverageMeter()
if is_vae:
kld_meter = AverageMeter()
lbe_meter = AverageMeter()
loss_meter = AverageMeter()
ssim_meter = AverageMeter()
start = time.time()
for step, (x, _) in enumerate(train_loader):
global_step += 1
seen_samples += x.shape[0]
x = x.cuda()
optimizer.zero_grad()
output = model(x)
x_hat = output["x_hat"]
A = output["A"]
z = output["z"]
criterion_loss = criterion(x_hat, x)
base_loss = criterion_loss
if is_vae:
kld_loss = output["kld"]
base_loss += kld_loss
loss, lbe_loss = lbe_fn(base_loss, A)
loss.backward()
optimizer.step()
loss_ = loss.item()
ssim = compute_ssim(x_hat, x).item()
num = x.size(0)
loss_meter.update(loss_, num)
criterion_meter.update(criterion_loss.item(), num)
if is_vae:
kld_meter.update(kld_loss.item(), num)
lbe_meter.update(lbe_loss.item() if hasattr(lbe_loss, "item") else lbe_loss, num)
ssim_meter.update(ssim, num)
if step % 100 == 0 or step + 1 == len(train_loader):
H_z = batch_entropy(z)
entropies = [batch_entropy(a) for a in A]
H_out = entropies[-1]
H_avg = torch.mean(torch.stack(entropies))
lbe_alpha_mean = torch.mean(lbe_fn.lbe_alpha_p)
lbe_alpha_min = torch.min(lbe_fn.lbe_alpha_p)
lbe_alpha_max = torch.max(lbe_fn.lbe_alpha_p)
if not DEBUG:
logs = {
f"train/{criterion.__class__.__name__}": criterion_meter.avg,
"train/lbe_loss": lbe_meter.avg,
"train/h_z": H_z,
"train/h_out": H_out,
"train/h_avg": H_avg,
"train/loss": loss_meter.avg,
"train/ssim": ssim_meter.avg,
"train/lbe_alpha_p": lbe_alpha_mean,
"train/lbe_alpha_p_min": lbe_alpha_min,
"train/lbe_alpha_p_max": lbe_alpha_max,
}
if is_vae:
logs[f"train/kld_loss"] = kld_meter.avg
wandb.log(logs, step=seen_samples, commit=WANDB_COMMIT)
logger.info(f'Epoch {epoch} Step {step}/{len(train_loader) - 1} '
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) '
f'SSIM {ssim_meter.val:.4f} ({ssim_meter.avg:.4f})')
elapsed = time.time() - start
logger.info(f'Elapsed {elapsed:.2f}')
test_ssim_sliding = {}
def evaluate(stage, epoch, model, criterion, lbe_fn, test_loader):
global test_ssim_sliding
test_ssim_sliding[stage] = [] if stage not in test_ssim_sliding else test_ssim_sliding[stage]
logger.info(f'### {stage.upper()} ###')
is_vae = isinstance(model, autoencoder.VAE)
model.eval()
loss_meter = AverageMeter()
ssim_meter = AverageMeter()
start = time.time()
for step, (x, _) in enumerate(test_loader):
x = x.cuda()
with torch.no_grad():
output = model(x)
x_hat = output["x_hat"]
A = output["A"]
criterion_loss = criterion(x_hat, x)
base_loss = criterion_loss
if is_vae:
kld_loss = output["kld"]
base_loss += kld_loss
loss, _ = lbe_fn(base_loss, A)
loss_ = loss.item()
ssim = compute_ssim(x_hat, x).item()
num = x.size(0)
loss_meter.update(loss_, num)
ssim_meter.update(ssim, num)
mean_ssim = ssim_meter.sum / len(test_loader.dataset)
test_ssim_sliding[stage].append(mean_ssim)
test_ssim_sliding[stage] = test_ssim_sliding[stage][-5:]
logger.info(f'Epoch {epoch} Loss {loss_meter.avg:.4f} SSIM {np.mean(test_ssim_sliding[stage]):.4f}')
elapsed = time.time() - start
logger.info(f'Elapsed {elapsed:.2f}')
if not DEBUG:
wandb.log({
f"{stage}/loss": loss_meter.avg,
f"{stage}/SSIM": np.mean(test_ssim_sliding[stage]),
}, step=seen_samples, commit=WANDB_COMMIT)
return loss_meter.avg
def main():
# parse command line arguments
config = parse_args()
logger.info(json.dumps(config, indent=2))
run_config = config['run_config']
optim_config = config['optim_config']
data_config = config['data_config']
utils_config = config['utils_config']
# set random seed
seed = run_config['seed']
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
# data loaders
train_loader, val_loader, test_loader = get_loader(
data_config['dataset'],
optim_config['batch_size'],
run_config['num_workers'])
# model
model = load_model(config['model_config'])
model.cuda()
n_params = sum([param.view(-1).size()[0] for param in model.parameters()])
logger.info('n_params: {}'.format(n_params))
# LBELoss
lbe_beta = optim_config["lbe_beta"]
lbe_alpha = optim_config["lbe_alpha"]
lbe_alpha_min = optim_config["lbe_alpha_min"]
num_layers = len(model.encoder.fcs) + len(model.decoder.fcs) + 1
if optim_config["criterion"] == "mse":
criterion = nn.MSELoss()
elif optim_config["criterion"] == "bce":
criterion = nn.BCELoss()
else:
raise ValueError(f"Invalid criterion, choose from ['mse', 'bce']")
if lbe_beta != 0.0:
lbe_fn = LBELoss(num_layers,lbe_alpha=lbe_alpha, lbe_beta=lbe_beta, lbe_alpha_min=lbe_alpha_min)
else:
lbe_fn = NoLBELoss()
params = list(model.parameters()) + list(lbe_fn.parameters())
# optimizer
optimizer = torch.optim.Adam(
params,
lr=optim_config['base_lr'],
weight_decay=optim_config['weight_decay'])
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="min", factor=0.5, patience=5
)
# Callbacks
reconstruct_images_cb = ReconstructImages(
num_images=5,
mean_only=True
)
plot_latent_space_cb = PlotLatentSpace(
num_batches=None,
quantiles=[0.025, 0.975],
size_recon=12
)
# run validation before start training
evaluate("val", 0, model, criterion, lbe_fn, test_loader)
reconstruct_images_cb("val", model, val_loader, seen_samples)
plot_latent_space_cb("val", model, val_loader, seen_samples)
if not DEBUG:
wandb.log({}, step=seen_samples) # Flush wandb logs
for epoch in range(1, optim_config['epochs'] + 1):
train(epoch, model, optimizer, criterion, lbe_fn, train_loader)
val_loss = evaluate("val", epoch, model, criterion, lbe_fn, val_loader)
scheduler.step(val_loss)
if epoch % utils_config["callback_interval"] == 0:
reconstruct_images_cb("val", model, val_loader, seen_samples)
plot_latent_space_cb("val", model, val_loader, seen_samples)
if not DEBUG:
wandb.log({}, step=seen_samples) # Flush wandb logs
# Test model
evaluate("test", epoch, model, criterion, lbe_fn, test_loader)
reconstruct_images_cb("test", model, test_loader, seen_samples)
plot_latent_space_cb("test", model, test_loader, seen_samples)
if not DEBUG:
wandb.log({}, step=seen_samples) # Flush wandb logs
wandb.finish()
if __name__ == '__main__':
main()
| 12,920 | 30.36165 | 136 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_resnet/batch_entropy.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import scipy
import scipy.stats
import random
def batch_entropy(x):
""" Estimate the differential entropy by assuming a gaussian distribution of
values for different samples of a mini-batch.
"""
if(x.shape[0] <= 1):
raise Exception("The batch entropy can only be calculated for |batch| > 1.")
x = torch.flatten(x, start_dim=1)
x_std = torch.std(x, dim=0)
entropies = 0.5 * torch.log(np.pi * np.e * x_std**2 + 1)
return torch.mean(entropies)
class LBELoss(nn.Module):
""" Computation of the LBE + CE loss.
See also https://www.wolframalpha.com/input/?i=%28%28x-0.8%29*0.5%29**2+for+x+from+0+to+2+y+from+0+to+0.5
"""
def __init__(self, num, lbe_alpha=0.5, lbe_alpha_min=0.2, lbe_beta=0.5):
super(LBELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
lbe_alpha = torch.ones(num) * lbe_alpha
self.lbe_alpha_p = torch.nn.Parameter(lbe_alpha, requires_grad=True)
self.lbe_alpha_min = torch.FloatTensor([lbe_alpha_min]).to("cuda")
self.lbe_beta = lbe_beta
def lbe_per_layer(self, a, i):
lbe_alpha_l = torch.abs(self.lbe_alpha_p[i])
lbe_l = (batch_entropy(a)-torch.maximum(self.lbe_alpha_min, lbe_alpha_l))**2
return lbe_l * self.lbe_beta
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
if A is None:
return ce, ce, torch.zeros(1)
losses = [self.lbe_per_layer(a, i) for i, a in enumerate(A)]
lbe = torch.mean(torch.stack(losses)) * ce
return ce+lbe, ce, lbe
class CELoss(nn.Module):
""" Wrapper around the Cross Entropy loss to be compatible with models
that output intermediate results.
"""
def __init__(self):
super(CELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
self.lbe_alpha_p = torch.zeros(1)
self.lbe_beta = torch.zeros(1)
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
return ce, ce, 0.0
| 2,157 | 31.208955 | 113 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_resnet/resnet.py | # coding: utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
def initialize_weights(module):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight.data, mode='fan_out')
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
module.bias.data.zero_()
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, stride):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride, # downsample with first conv
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module(
'conv',
nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride, # downsample
padding=0,
bias=False))
self.shortcut.add_module('bn', nn.BatchNorm2d(out_channels)) # BN
def forward(self, x_a):
x, A = x_a
y = F.relu(self.bn1(self.conv1(x)))
y = self.bn2(self.conv2(y))
A.append(y)
x_shortcut = self.shortcut(x)
if(len(self.shortcut) > 0):
A.append(x_shortcut)
y += x_shortcut
y = F.relu(y) # apply ReLU after addition
return y, A
class BasicBlockLBE(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, stride):
super(BasicBlockLBE, self).__init__()
self.conv1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride, # downsample with first conv
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module(
'conv',
nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride, # downsample
padding=0,
bias=False))
self.shortcut.add_module('bn', nn.BatchNorm2d(out_channels)) # BN
def forward(self, x_a):
x, A = x_a
y = F.relu(self.bn1(self.conv1(x)))
y = self.bn2(self.conv2(y))
x_shortcut = self.shortcut(x)
# if(len(self.shortcut) > 0):
# A.append(x_shortcut)
y += x_shortcut
y = F.relu(y) # apply ReLU after addition
A.append(y)
return y, A
class BottleneckBlock(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, stride):
super(BottleneckBlock, self).__init__()
bottleneck_channels = out_channels // self.expansion
self.conv1 = nn.Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(bottleneck_channels)
self.conv2 = nn.Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride, # downsample with 3x3 conv
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(bottleneck_channels)
self.conv3 = nn.Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential() # identity
if in_channels != out_channels:
self.shortcut.add_module(
'conv',
nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride, # downsample
padding=0,
bias=False))
self.shortcut.add_module('bn', nn.BatchNorm2d(out_channels)) # BN
def forward(self, x_a):
x, A = x_a
y = F.relu(self.bn1(self.conv1(x)), inplace=True)
y = F.relu(self.bn2(self.conv2(y)), inplace=True)
y = self.bn3(self.conv3(y)) # not apply ReLU
y += self.shortcut(x)
y = F.relu(y, inplace=True) # apply ReLU after addition
return y, A
class Network(nn.Module):
def __init__(self, config):
super(Network, self).__init__()
input_shape = config['input_shape']
n_classes = config['n_classes']
base_channels = config['base_channels']
block_type = config['block_type']
depth = config['depth']
assert block_type in ['basic', 'basic_lbe', 'bottleneck']
if block_type == 'basic':
block = BasicBlock
n_blocks_per_stage = (depth - 2) // 6
assert n_blocks_per_stage * 6 + 2 == depth
elif block_type == 'basic_lbe':
block = BasicBlockLBE
n_blocks_per_stage = (depth - 2) // 6
assert n_blocks_per_stage * 6 + 2 == depth
else:
block = BottleneckBlock
n_blocks_per_stage = (depth - 2) // 9
assert n_blocks_per_stage * 9 + 2 == depth
n_channels = [
base_channels, base_channels * 2 * block.expansion,
base_channels * 4 * block.expansion
]
self.conv = nn.Conv2d(
input_shape[1],
n_channels[0],
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn = nn.BatchNorm2d(base_channels)
self.stage1 = self._make_stage(
n_channels[0], n_channels[0], n_blocks_per_stage, block, stride=1)
self.stage2 = self._make_stage(
n_channels[0], n_channels[1], n_blocks_per_stage, block, stride=2)
self.stage3 = self._make_stage(
n_channels[1], n_channels[2], n_blocks_per_stage, block, stride=2)
# compute conv feature size
with torch.no_grad():
tmp_out, _ = self._forward_conv(torch.zeros(*input_shape))
self.feature_size = tmp_out.view(-1).shape[0]
self.fc = nn.Linear(self.feature_size, n_classes)
# initialize weights
self.apply(initialize_weights)
def _make_stage(self, in_channels, out_channels, n_blocks, block, stride):
stage = nn.Sequential()
for index in range(n_blocks):
block_name = 'block{}'.format(index + 1)
if index == 0:
stage.add_module(
block_name, block(
in_channels, out_channels, stride=stride))
else:
stage.add_module(block_name,
block(out_channels, out_channels, stride=1))
return stage
def _forward_conv(self, x):
A = []
x = F.relu(self.bn(self.conv(x)), inplace=True)
x, A = self.stage1((x, A))
x, A = self.stage2((x, A))
x, A = self.stage3((x, A))
x = F.adaptive_avg_pool2d(x, output_size=1)
return x, A
def forward(self, x):
x, A = self._forward_conv(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x, A | 8,024 | 29.865385 | 78 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_resnet/dataloader.py | # coding: utf-8
import numpy as np
import torch
import torch.utils.data
import torchvision
import torchvision.models
from torchvision import transforms
from torchvision import datasets
def get_loader(dataset, batch_size, num_workers):
if dataset == "mnist":
return get_mnist_loader(batch_size, num_workers)
elif dataset == "fashionmnist":
return get_fashionmnist_loader(batch_size, num_workers)
elif dataset == "cifar10":
return get_cifar10_loader(batch_size, num_workers)
elif dataset == "cifar100":
return get_cifar100_loader(batch_size, num_workers)
else:
raise Exception(f"Dataset {dataset} not found.")
def get_mnist_loader(batch_size, num_workers):
tmp_set = torchvision.datasets.MNIST('.data',
train=True,
download=True,
transform=transforms.Compose([transforms.ToTensor()]))
tmp_loader = torch.utils.data.DataLoader(tmp_set, batch_size=len(tmp_set), num_workers=1)
tmp_data = next(iter(tmp_loader))
mean, std = tmp_data[0].mean(), tmp_data[0].std()
transform_train=transforms.Compose([
transforms.RandomAffine(degrees=20, translate=(0.1,0.1), scale=(0.9, 1.1)),
transforms.ColorJitter(brightness=0.2, contrast=0.2),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
train_dataset = datasets.MNIST('.data', train=True, download=True, transform=transform_train)
print(f"Dataset size: {len(train_dataset)}")
train_dataset, eval_dataset = torch.utils.data.random_split(train_dataset, [50000, 10000])
transform_test=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_dataset = datasets.MNIST('.data', train=False, transform=transform_test)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
eval_loader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
pin_memory=True,
drop_last=False,
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
pin_memory=True,
drop_last=False,
)
return train_loader, eval_loader, test_loader
def get_fashionmnist_loader(batch_size, num_workers):
tmp_set = torchvision.datasets.FashionMNIST('.data',
train=True,
download=True,
transform=transforms.Compose([transforms.ToTensor()]))
tmp_loader = torch.utils.data.DataLoader(tmp_set, batch_size=len(tmp_set), num_workers=1)
tmp_data = next(iter(tmp_loader))
mean, std = tmp_data[0].mean(), tmp_data[0].std()
transform_train=transforms.Compose([
transforms.RandomAffine(degrees=20, translate=(0.1,0.1), scale=(0.9, 1.1)),
transforms.ColorJitter(brightness=0.2, contrast=0.2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
train_dataset = datasets.FashionMNIST('.data', train=True, download=True, transform=transform_train)
print(f"Dataset size: {len(train_dataset)}")
train_dataset, eval_dataset = torch.utils.data.random_split(train_dataset, [50000, 10000])
transform_test=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_dataset = datasets.FashionMNIST('.data', train=False, transform=transform_test)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
eval_loader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
pin_memory=True,
drop_last=False,
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
pin_memory=True,
drop_last=False,
)
return train_loader, eval_loader, test_loader
def get_cifar10_loader(batch_size, num_workers):
tmp_set = torchvision.datasets.CIFAR10('.data',
train=True,
download=True,
transform=transforms.Compose([transforms.ToTensor()]))
tmp_loader = torch.utils.data.DataLoader(tmp_set, batch_size=len(tmp_set), num_workers=1)
tmp_data = next(iter(tmp_loader))
mean, std = tmp_data[0].mean(), tmp_data[0].std()
train_transform = torchvision.transforms.Compose([
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std),
])
test_transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std),
])
train_dataset = torchvision.datasets.CIFAR10(
".data", train=True, transform=train_transform, download=True)
print(f"Dataset size: {len(train_dataset)}")
train_dataset, eval_dataset = torch.utils.data.random_split(train_dataset, [40000, 10000])
test_dataset = torchvision.datasets.CIFAR10(
".data", train=False, transform=test_transform, download=True)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
eval_loader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
pin_memory=True,
drop_last=False,
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
pin_memory=True,
drop_last=False,
)
return train_loader, eval_loader, test_loader
def get_cifar100_loader(batch_size, num_workers):
tmp_set = torchvision.datasets.CIFAR100('.data',
train=True,
download=True,
transform=transforms.Compose([transforms.ToTensor()]))
tmp_loader = torch.utils.data.DataLoader(tmp_set, batch_size=len(tmp_set), num_workers=1)
tmp_data = next(iter(tmp_loader))
mean, std = tmp_data[0].mean(), tmp_data[0].std()
train_transform = torchvision.transforms.Compose([
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std),
])
test_transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std),
])
train_dataset = torchvision.datasets.CIFAR100(
".data", train=True, transform=train_transform, download=True)
print(f"Dataset size: {len(train_dataset)}")
train_dataset, eval_dataset = torch.utils.data.random_split(train_dataset, [40000, 10000])
test_dataset = torchvision.datasets.CIFAR100(
".data", train=False, transform=test_transform, download=True)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
eval_loader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
pin_memory=True,
drop_last=False,
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
pin_memory=True,
drop_last=False,
)
return train_loader, eval_loader, test_loader
| 8,138 | 32.9125 | 104 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_resnet/train.py | #!/usr/bin/env python
# coding: utf-8
# From https://github.dev/hysts/pytorch_resnet/blob/master/main.py
from email.policy import default
import os
import time
import importlib
import json
from collections import OrderedDict
import logging
import argparse
import numpy as np
import random
import wandb
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
import torch.backends.cudnn
import torchvision.utils
from dataloader import get_loader
from batch_entropy import LBELoss, CELoss, batch_entropy
torch.backends.cudnn.benchmark = True
logging.basicConfig(
format='[%(asctime)s %(name)s %(levelname)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
global_step = 0
seen_samples = 0
def str2bool(s):
if s.lower() == 'true':
return True
elif s.lower() == 'false':
return False
else:
raise RuntimeError('Boolean value expected')
def parse_args():
parser = argparse.ArgumentParser()
# data config
parser.add_argument('--dataset', type=str, default="cifar10")
# model config
parser.add_argument('--arch', type=str, default="residual")
parser.add_argument('--block_type', type=str, default="basic")
parser.add_argument('--depth', type=int, required=True)
parser.add_argument('--base_channels', type=int, default=16)
# run config
parser.add_argument('--seed', type=int, default=17)
parser.add_argument('--num_workers', type=int, default=7)
parser.add_argument('--device', type=str, default="cuda")
# optim config
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--learning_rate', type=float, default=0.1)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--nesterov', type=str2bool, default=True)
parser.add_argument('--milestones', type=str, default='[40, 80]')
parser.add_argument('--lr_decay', type=float, default=0.1)
parser.add_argument('--lbe_alpha', type=float, default=0.8,
help='Desired entropy at the beginning of trainig.')
parser.add_argument('--lbe_beta', type=float, default=0.0,
help='Weight lbe loss.')
args = parser.parse_args()
wandb.init(config=args)
if((args.lbe_alpha == 0 and args.lbe_beta != 0) or (args.lbe_alpha != 0 and args.lbe_beta == 0)):
wandb.finish(exit_code=0)
exit()
if args.block_type == "basic" and args.lbe_beta > 0.0:
args.block_type = "basic_lbe"
dataset =args.dataset.lower()
input_shape = (1, 1, 28, 28) if dataset == "mnist" else \
(1, 1, 28, 28) if dataset == "fashionmnist" else \
(1, 3, 32, 32) if dataset == "cifar10" else \
(1, 3, 32, 32) if dataset == "cifar100" else \
(1, 3, 32, 32)
n_classes = 10 if dataset == "mnist" else \
10 if dataset == "fashionmnist" else \
10 if dataset == "cifar10" else \
100 if dataset == "cifar100" else \
10
model_config = OrderedDict([
('arch', 'resnet'),
('block_type', args.block_type),
('depth', args.depth),
('base_channels', args.base_channels),
('input_shape', input_shape),
('n_classes', n_classes),
])
optim_config = OrderedDict([
('epochs', args.epochs),
('batch_size', args.batch_size),
('learning_rate', args.learning_rate),
('weight_decay', args.weight_decay),
('momentum', args.momentum),
('nesterov', args.nesterov),
('milestones', json.loads(args.milestones)),
('lr_decay', args.lr_decay),
('lbe_beta', args.lbe_beta),
('lbe_alpha', args.lbe_alpha),
])
data_config = OrderedDict([
('dataset', dataset),
])
run_config = OrderedDict([
('seed', args.seed),
('num_workers', args.num_workers),
])
config = OrderedDict([
('model_config', model_config),
('optim_config', optim_config),
('data_config', data_config),
('run_config', run_config),
])
return config
def load_model(config):
module = importlib.import_module(config['arch'])
Network = getattr(module, 'Network')
return Network(config)
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def train(epoch, model, optimizer, criterion, train_loader):
global global_step
global seen_samples
logger.info('Train {}'.format(epoch))
model.train()
ce_meter = AverageMeter()
lbe_meter = AverageMeter()
loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
start = time.time()
for step, (data, targets) in enumerate(train_loader):
global_step += 1
seen_samples += data.shape[0]
data = data.cuda()
targets = targets.cuda()
optimizer.zero_grad()
outputs, A = model(data)
loss, ce_loss, lbe_loss = criterion((outputs, A), targets)
loss.backward()
optimizer.step()
_, preds = torch.max(outputs, dim=1)
loss_ = loss.item()
correct_ = preds.eq(targets).sum().item()
num = data.size(0)
accuracy = correct_ / num
loss_meter.update(loss_, num)
ce_meter.update(ce_loss.item(), num)
lbe_meter.update(lbe_loss.item() if hasattr(lbe_loss, "item") else lbe_loss, num)
accuracy_meter.update(accuracy, num)
if step % 100 == 0:
entropies = [batch_entropy(a) for a in A]
H_out = entropies[-1]
H_avg = torch.mean(torch.stack(entropies))
lbe_alpha_mean = torch.mean(criterion.lbe_alpha_p)
lbe_alpha_min = torch.min(criterion.lbe_alpha_p)
lbe_alpha_max = torch.max(criterion.lbe_alpha_p)
wandb.log({
"train/loss_ce": ce_meter.avg,
"train/loss_lbe": lbe_meter.avg,
"train/h_out": H_out,
"train/h_avg": H_avg,
"train/loss": loss_meter.avg,
"train/accuracy": accuracy_meter.avg,
"train/lbe_alpha_p": lbe_alpha_mean,
"train/lbe_alpha_p_min": lbe_alpha_min,
"train/lbe_alpha_p_max": lbe_alpha_max,
}, step=seen_samples)
logger.info('Epoch {} Step {}/{} '
'Loss {:.4f} ({:.4f}) '
'Accuracy {:.4f} ({:.4f})'.format(
epoch,
step,
len(train_loader),
loss_meter.val,
loss_meter.avg,
accuracy_meter.val,
accuracy_meter.avg,
))
elapsed = time.time() - start
logger.info('Elapsed {:.2f}'.format(elapsed))
test_acc_sliding = {}
def test(name, epoch, model, criterion, test_loader):
global test_acc_sliding
test_acc_sliding[name] = [] if name not in test_acc_sliding else test_acc_sliding[name]
logger.info('{} {}'.format(name, epoch))
model.eval()
loss_meter = AverageMeter()
correct_meter = AverageMeter()
start = time.time()
for step, (data, targets) in enumerate(test_loader):
data = data.cuda()
targets = targets.cuda()
with torch.no_grad():
outputs, A = model(data)
loss, _, _ = criterion((outputs, A), targets)
_, preds = torch.max(outputs, dim=1)
loss_ = loss.item()
correct_ = preds.eq(targets).sum().item()
num = data.size(0)
loss_meter.update(loss_, num)
correct_meter.update(correct_, 1)
accuracy = correct_meter.sum / len(test_loader.dataset)
test_acc_sliding[name].append(accuracy)
test_acc_sliding[name] = test_acc_sliding[name][-5:]
logger.info('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(
epoch, loss_meter.avg, np.mean(test_acc_sliding[name])))
elapsed = time.time() - start
logger.info('Elapsed {:.2f}'.format(elapsed))
wandb.log({
f"{name}/loss": loss_meter.avg,
f"{name}/accuracy": np.mean(test_acc_sliding[name]),
}, step=seen_samples)
def main():
# parse command line arguments
config = parse_args()
logger.info(json.dumps(config, indent=2))
run_config = config['run_config']
optim_config = config['optim_config']
data_config = config['data_config']
# set random seed
seed = run_config['seed']
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# data loaders
train_loader, eval_loader, test_loader = get_loader(
data_config['dataset'],
optim_config['batch_size'],
run_config['num_workers'])
# model
model = load_model(config['model_config'])
model.cuda()
n_params = sum([param.view(-1).size()[0] for param in model.parameters()])
logger.info('n_params: {}'.format(n_params))
lbe_alpha = optim_config["lbe_alpha"]
lbe_beta = optim_config["lbe_beta"]
num_layers = (len(model.stage1) + len(model.stage2) + len(model.stage3)) + 2
criterion = LBELoss(num_layers,lbe_alpha=lbe_alpha+0.2, lbe_alpha_min=lbe_alpha, lbe_beta=lbe_beta) if lbe_beta != 0.0 else CELoss()
params = list(model.parameters()) + list(criterion.parameters())
# optimizer
optimizer = torch.optim.SGD(
params,
lr=optim_config['learning_rate'],
momentum=optim_config['momentum'],
weight_decay=optim_config['weight_decay'],
nesterov=optim_config['nesterov'])
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=optim_config['milestones'],
gamma=optim_config['lr_decay'])
# run test before start training
test("eval", 0, model, criterion, eval_loader)
test("test", 0, model, criterion, test_loader)
for epoch in range(1, optim_config['epochs'] + 1):
scheduler.step()
train(epoch, model, optimizer, criterion, train_loader)
test("eval", epoch, model, criterion, eval_loader)
test("test", epoch, model, criterion, test_loader)
if __name__ == '__main__':
main()
| 10,646 | 29.682997 | 136 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_fnn/batch_entropy.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import scipy
import scipy.stats
import random
def batch_entropy(x):
""" Estimate the differential entropy by assuming a gaussian distribution of
values for different samples of a mini-batch.
"""
if(x.shape[0] <= 1):
raise Exception("The batch entropy can only be calculated for |batch| > 1.")
x = torch.flatten(x, start_dim=1)
x_std = torch.std(x, dim=0)
entropies = 0.5 * torch.log(np.pi * np.e * x_std**2 + 1)
return torch.mean(entropies)
class LBELoss(nn.Module):
""" Computation of the LBE + CE loss.
See also https://www.wolframalpha.com/input/?i=%28%28x-0.8%29*0.5%29**2+for+x+from+0+to+2+y+from+0+to+0.5
"""
def __init__(self, num, lbe_alpha=0.5, lbe_alpha_min=0.2, lbe_beta=0.5):
super(LBELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
lbe_alpha = torch.ones(num) * lbe_alpha
self.lbe_alpha_p = torch.nn.Parameter(lbe_alpha, requires_grad=True)
self.lbe_alpha_min = torch.FloatTensor([lbe_alpha_min]).to("cuda")
self.lbe_beta = lbe_beta
def lbe_per_layer(self, a, i):
lbe_alpha_l = torch.abs(self.lbe_alpha_p[i])
lbe_l = (batch_entropy(a)-torch.maximum(self.lbe_alpha_min, lbe_alpha_l))**2
return lbe_l * self.lbe_beta
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
if A is None:
return ce, ce, torch.zeros(1)
losses = [self.lbe_per_layer(a, i) for i, a in enumerate(A)]
lbe = torch.mean(torch.stack(losses)) * ce
return ce+lbe, ce, lbe
class CELoss(nn.Module):
""" Wrapper around the Cross Entropy loss to be compatible with models
that output intermediate results.
"""
def __init__(self):
super(CELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
self.lbe_alpha_p = torch.zeros(1)
self.lbe_beta = torch.zeros(1)
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
return ce, ce, 0.0
| 2,157 | 31.208955 | 113 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_fnn/train.py | from __future__ import print_function
import argparse
import torch
import copy
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch_optimizer as optim_special
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from batch_entropy import batch_entropy, LBELoss, CELoss
import time
import numpy as np
import wandb
parser = argparse.ArgumentParser(description='Conflicting bundl--deptes with PyTorch and MNIST')
parser.add_argument('--arch', type=str, default="FNN", metavar='S',
help='Architecture - For WanDB filtering')
parser.add_argument('--depth', type=int, default=50, metavar='S',
help='Depth of network')
parser.add_argument('--width', type=int, default=1000, metavar='S',
help='Width of network')
parser.add_argument('--num_classes', type=int, default=10,
help='Number of classes used for predictions')
parser.add_argument('--batch_size', type=int, default=512, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size', type=int, default=1024, metavar='N',
help='input batch size for testing (default: 1024)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--learning_rate', type=float, default=5e-5, metavar='Learning rate',
help='learning rate (default: 1.0)')
parser.add_argument('--lbe_alpha', type=float, default=0.0,
help='Desired entropy at the beginning of trainig.')
parser.add_argument('--lbe_beta', type=float, default=0.0,
help='Weight lbe loss.')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save_model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
if((args.lbe_alpha == 0 and args.lbe_beta != 0) or (args.lbe_alpha != 0 and args.lbe_beta == 0)):
wandb.finish(exit_code=0)
exit()
# Get device
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print(f"Using device {device}")
class FNN(nn.Module):
def __init__(self, args):
super(FNN, self).__init__()
self.args = args
self.width = args.width
self.depth = args.depth
self.fc_in = nn.Linear(28*28, self.width)
fcs = [nn.Linear(self.width, self.width) for i in range(self.depth-2)]
self.fcs = nn.ModuleList(fcs)
self.fc_embeddings = nn.Linear(self.width, self.width)
self.fc_classifier = nn.Linear(fcs[-1].out_features, args.num_classes)
def forward(self, x):
a = []
x = torch.flatten(x, 1)
x = F.relu(self.fc_in(x))
for fc in self.fcs:
x = F.relu(fc(x))
a.append(x)
x = F.relu(self.fc_embeddings(x))
x = self.fc_classifier(x)
return x, a
def train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples):
model.train()
Hs = []
loss = None
for batch_idx, (data, target) in enumerate(train_loader):
start = time.time()
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output, A = model(data)
# Add loss if necessary to optimize into the correct direction
loss, ce_loss, lbe_loss = criterion((output, A), target)
loss.backward()
optimizer.step()
end = time.time()
seen_samples += output.shape[0]
if batch_idx % args.log_interval == 0:
entropies = [batch_entropy(a) for a in A]
H_out = entropies[-1]
H_avg = torch.mean(torch.stack(entropies))
pred = output.argmax(dim=1, keepdim=True)
correct = pred.eq(target.view_as(pred)).sum().item()
train_acc = correct / output.shape[0]
lbe_alpha_mean = torch.mean(criterion.lbe_alpha_p)
lbe_alpha_min = torch.min(criterion.lbe_alpha_p)
lbe_alpha_max = torch.max(criterion.lbe_alpha_p)
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tAccuracy: {:.4f}\tLoss: {:.4f}\tTime: {:.4f}\tH_last: {}\t'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_acc, loss.item(), end - start, H_out))
wandb.log({
"train/h_out": H_out,
"train/h_avg": H_avg,
"train/loss": loss,
"train/loss_lbe": lbe_loss,
"train/loss_ce": ce_loss,
"train/accuracy": train_acc,
"train/lbe_alpha_p": lbe_alpha_mean,
"train/lbe_alpha_p_min": lbe_alpha_min,
"train/lbe_alpha_p_max": lbe_alpha_max,
}, step=seen_samples)
return seen_samples
test_acc_sliding = {}
def test(name, model, device, test_loader, criterion, seen_samples):
global test_acc_sliding
model.eval()
test_loss = 0
correct = 0
test_acc_sliding[name] = [] if name not in test_acc_sliding else test_acc_sliding[name]
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output, A = model(data)
test_loss += criterion((output, A), target)[0]
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss = test_loss / len(test_loader)
test_acc = correct / len(test_loader.dataset)
# Sliding avg of test acc over the last epochs
test_acc_sliding[name].append(test_acc)
test_acc_sliding[name] = test_acc_sliding[name][-5:]
print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(name,
test_loss, correct, len(test_loader.dataset), test_acc * 100))
wandb.log({
f"{name}/accuracy": np.mean(test_acc_sliding[name]),
f"{name}/loss_ce": test_loss
}, step=seen_samples)
return test_acc
def main():
# Init dataset
transform_train=transforms.Compose([
transforms.RandomAffine(degrees=20, translate=(0.1,0.1), scale=(0.9, 1.1)),
transforms.ColorJitter(brightness=0.2, contrast=0.2),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_train = datasets.MNIST('.data', train=1, download=True, transform=transform_train)
ds_train, ds_eval = torch.utils.data.random_split(ds_train, [50000, 10000])
transform_test=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_test = datasets.MNIST('.data', train=False, transform=transform_test)
wandb.init(config=args)
train_kwargs = {'batch_size': args.batch_size, "shuffle": True}
test_kwargs = {'batch_size': args.test_batch_size, "shuffle": False}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
train_loader = torch.utils.data.DataLoader(ds_train,**train_kwargs)
test_loader = torch.utils.data.DataLoader(ds_test, **test_kwargs)
eval_loader = torch.utils.data.DataLoader(ds_eval, **test_kwargs)
model = FNN(args=args).to(device)
criterion = LBELoss(args.depth-2, lbe_alpha=args.lbe_alpha, lbe_beta=args.lbe_beta) if args.lbe_beta != 0.0 else CELoss()
params = list(model.parameters()) + list(criterion.parameters())
optimizer = optim.Adam(params, lr=args.learning_rate)
# Note that pytorch calls kaiming per default via reset_parameters in __init__:
# https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py#L81
seen_samples = 0
epoch = 0
for epoch in range(args.epochs):
seen_samples = train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples)
if(epoch % 5 == 0):
accuracy = test("eval", model, device, eval_loader, criterion, seen_samples)
accuracy = test("test", model, device, test_loader, criterion, seen_samples)
return accuracy
if __name__ == '__main__':
main() | 8,759 | 38.638009 | 125 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_neuron_dist/batch_entropy.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import scipy
import scipy.stats
import random
def batch_entropy(x):
""" Estimate the differential entropy by assuming a gaussian distribution of
values for different samples of a mini-batch.
"""
if(x.shape[0] <= 1):
raise Exception("The batch entropy can only be calculated for |batch| > 1.")
x = torch.flatten(x, start_dim=1)
x_std = torch.std(x, dim=0)
entropies = 0.5 * torch.log(np.pi * np.e * x_std**2 + 1)
return torch.mean(entropies)
class LBELoss(nn.Module):
""" Computation of the LBE + CE loss.
See also https://www.wolframalpha.com/input/?i=%28%28x-0.8%29*0.5%29**2+for+x+from+0+to+2+y+from+0+to+0.5
"""
def __init__(self, num, lbe_alpha=0.5, lbe_alpha_min=0.2, lbe_beta=0.5):
super(LBELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
lbe_alpha = torch.ones(num) * lbe_alpha
self.lbe_alpha_p = torch.nn.Parameter(lbe_alpha, requires_grad=True)
self.lbe_alpha_min = torch.FloatTensor([lbe_alpha_min]).to("cuda")
self.lbe_beta = lbe_beta
def lbe_per_layer(self, a, i):
lbe_alpha_l = torch.abs(self.lbe_alpha_p[i])
lbe_l = (batch_entropy(a)-torch.maximum(self.lbe_alpha_min, lbe_alpha_l))**2
return lbe_l * self.lbe_beta
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
if A is None:
return ce, ce, torch.zeros(1)
losses = [self.lbe_per_layer(a, i) for i, a in enumerate(A)]
lbe = torch.mean(torch.stack(losses)) * ce
return ce+lbe, ce, lbe
class CELoss(nn.Module):
""" Wrapper around the Cross Entropy loss to be compatible with models
that output intermediate results.
"""
def __init__(self):
super(CELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
self.lbe_alpha_p = torch.zeros(1)
self.lbe_beta = torch.zeros(1)
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
return ce, ce, 0.0
| 2,157 | 31.208955 | 113 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_neuron_dist/train.py | from __future__ import print_function
import argparse
import torch
import copy
import torch.nn as nn
from random import randint
import torch.nn.functional as F
import torch.optim as optim
import torch_optimizer as optim_special
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from batch_entropy import batch_entropy, CELoss
import time
import numpy as np
import wandb
import matplotlib.pyplot as plt
import scipy
import math
parser = argparse.ArgumentParser(description='Conflicting bundl--deptes with PyTorch and MNIST')
parser.add_argument('--arch', type=str, default="FNN2", metavar='S',
help='Architecture - For WanDB filtering')
parser.add_argument('--depth', type=int, default=6, metavar='S',
help='Depth of network')
parser.add_argument('--width', type=int, default=500, metavar='S',
help='Width of network')
parser.add_argument('--num_classes', type=int, default=10,
help='Number of classes used for predictions')
parser.add_argument('--batch_size', type=int, default=1024, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size', type=int, default=1024, metavar='N',
help='input batch size for testing (default: 1024)')
parser.add_argument('--epochs', type=int, default=5, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--learning_rate', type=float, default=1e-5, metavar='Learning rate',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.5, metavar='M',
help='Learning rate step gamma (default: 0.5)')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save_model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
# Get device
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print(f"Using device {device}")
n_neurons=5
class FNN(nn.Module):
def __init__(self, args):
super(FNN, self).__init__()
self.args = args
self.width = args.width
self.depth = args.depth
self.fc_in = nn.Linear(28*28, self.width)
fcs = [nn.Linear(self.width, self.width) for i in range(self.depth-2)]
self.fcs = nn.ModuleList(fcs)
self.fc_embeddings = nn.Linear(self.width, self.width)
self.fc_classifier = nn.Linear(fcs[-1].out_features, args.num_classes)
def forward(self, x):
a = []
x = torch.flatten(x, 1)
x = F.relu(self.fc_in(x))
for fc in self.fcs:
x = F.relu(fc(x))
a.append(x)
x = F.relu(self.fc_embeddings(x))
a.append(x)
x = self.fc_classifier(x)
return x, a
def train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples):
model.train()
loss = None
for batch_idx, (data, target) in enumerate(train_loader):
start = time.time()
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output, A = model(data)
# Add loss if necessary to optimize into the correclbe_alphat direction
loss, _, _ = criterion((output, A), target)
loss.backward()
optimizer.step()
end = time.time()
seen_samples += output.shape[0]
if batch_idx % args.log_interval == 0:
entropies = [batch_entropy(a) for a in A]
H_out = entropies[-1]
pred = output.argmax(dim=1, keepdim=True)
correct = pred.eq(target.view_as(pred)).sum().item()
train_acc = correct / output.shape[0]
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tAccuracy: {:.4f}\tLoss: {:.4f}\tTime: {:.4f}\tH_last: {}\t'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_acc, loss.item(), end - start, H_out))
# Create plot form last batch of epoch
depth = args.depth
neuron_a = {}
# Init random neurons
for d in range(depth-1):
random_neurons = [randint(0, args.width-1) for _ in range(n_neurons)]
neuron_a[d] = {}
for n in random_neurons:
vals = A[d][:,n].cpu().detach().numpy()
neuron_a[d][n] = vals
return seen_samples, neuron_a
test_acc_sliding = []
def test(model, device, test_loader, criterion, seen_samples):
global test_acc_sliding
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output, A = model(data)
test_loss += criterion((output, A), target)[0]
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss = test_loss / len(test_loader)
test_acc = correct / len(test_loader.dataset)
# Sliding avg of test acc over the last epochs
test_acc_sliding.append(test_acc)
test_acc_sliding = test_acc_sliding[-5:]
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), test_acc * 100))
return test_acc
def main():
# Init dataset
transform_train=transforms.Compose([
transforms.RandomAffine(degrees=20, translate=(0.1,0.1), scale=(0.9, 1.1)),
transforms.ColorJitter(brightness=0.2, contrast=0.2),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_train = datasets.MNIST('.data', train=1, download=True, transform=transform_train)
transform_test=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_test = datasets.MNIST('.data', train=False, transform=transform_test)
wandb.init(config=args)
train_kwargs = {'batch_size': args.batch_size, "shuffle": True}
test_kwargs = {'batch_size': args.test_batch_size, "shuffle": False}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
train_loader = torch.utils.data.DataLoader(ds_train,**train_kwargs)
test_loader = torch.utils.data.DataLoader(ds_test, **test_kwargs)
model = FNN(args=args).to(device)
criterion = CELoss()
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=[20, 40],
gamma=args.gamma)
# Note that pytorch calls kaiming per default via reset_parameters in __init__:
# https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py#L81
seen_samples = 0
epoch = 0
for epoch in range(args.epochs):
scheduler.step()
seen_samples, neurons_a = train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples)
accuracy = test(model, device, test_loader, criterion, seen_samples)
plt.clf()
plt.xticks([])
plt.yticks([])
plt.axis('off')
fig, axes = plt.subplots(nrows=args.depth-1, ncols=n_neurons, sharex=True, sharey=False)
fig.tight_layout()
max_val = max([max(k) for l in neurons_a.values() for k in l.values()])
min_val = min([min(k) for l in neurons_a.values() for k in l.values()])
for d in neurons_a.keys():
for i, n in enumerate(neurons_a[d].keys()):
ax = axes[d][i]
data = neurons_a[d][n][:64]
ax.hist(data, bins=50, range=(min_val, max_val))
ax.set_yticks([])
ax.set_xticks([])
entropy = batch_entropy(torch.tensor([[x] for x in data]))
title = " H=%.1f" % (entropy)
ax.set_title(title, size=8)
fig.savefig(f"neuron_dist.png", bbox_inches='tight', dpi=150)
return accuracy
if __name__ == '__main__':
main() | 8,566 | 36.08658 | 121 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_info_flow/batch_entropy.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import scipy
import scipy.stats
import random
def batch_entropy(x):
""" Estimate the differential entropy by assuming a gaussian distribution of
values for different samples of a mini-batch.
"""
if(x.shape[0] <= 1):
raise Exception("The batch entropy can only be calculated for |batch| > 1.")
x = torch.flatten(x, start_dim=1)
x_std = torch.std(x, dim=0)
entropies = 0.5 * torch.log(np.pi * np.e * x_std**2 + 1)
return torch.mean(entropies)
class LBELoss(nn.Module):
""" Computation of the LBE + CE loss.
See also https://www.wolframalpha.com/input/?i=%28%28x-0.8%29*0.5%29**2+for+x+from+0+to+2+y+from+0+to+0.5
"""
def __init__(self, num, lbe_alpha=0.5, lbe_alpha_min=0.2, lbe_beta=0.5):
super(LBELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
lbe_alpha = torch.ones(num) * lbe_alpha
self.lbe_alpha_p = torch.nn.Parameter(lbe_alpha, requires_grad=True)
self.lbe_alpha_min = torch.FloatTensor([lbe_alpha_min]).to("cuda")
self.lbe_beta = lbe_beta
def lbe_per_layer(self, a, i):
lbe_alpha_l = torch.abs(self.lbe_alpha_p[i])
lbe_l = (batch_entropy(a)-torch.maximum(self.lbe_alpha_min, lbe_alpha_l))**2
return lbe_l * self.lbe_beta
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
if A is None:
return ce, ce, torch.zeros(1)
losses = [self.lbe_per_layer(a, i) for i, a in enumerate(A)]
lbe = torch.mean(torch.stack(losses)) * ce
return ce+lbe, ce, lbe
class CELoss(nn.Module):
""" Wrapper around the Cross Entropy loss to be compatible with models
that output intermediate results.
"""
def __init__(self):
super(CELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
self.lbe_alpha_p = torch.zeros(1)
self.lbe_beta = torch.zeros(1)
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
return ce, ce, 0.0
| 2,157 | 31.208955 | 113 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_info_flow/train.py | from __future__ import print_function
import argparse
import torch
import copy
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch_optimizer as optim_special
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from batch_entropy import batch_entropy, LBELoss, CELoss
import time
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='Conflicting bundl--deptes with PyTorch and MNIST')
parser.add_argument('--depth', type=int, default=5, metavar='S',
help='Depth of network')
parser.add_argument('--width', type=int, default=1000, metavar='S',
help='Width of network')
parser.add_argument('--num_classes', type=int, default=10,
help='Number of classes used for predictions')
parser.add_argument('--batch_size', type=int, default=512, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size', type=int, default=1024, metavar='N',
help='input batch size for testing (default: 1024)')
parser.add_argument('--epochs', type=int, default=2, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--learning_rate', type=float, default=3e-5, metavar='Learning rate',
help='learning rate (default: 1.0)')
parser.add_argument('--lbe_alpha', type=float, default=0.8,
help='Desired entropy at the beginning of trainig.')
parser.add_argument('--lbe_beta', type=float, default=0.0,
help='Weight lbe loss.')
parser.add_argument('--no_cuda', action='store_true', default=True,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=5, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
# Get device
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print(f"Using device {device}")
class FNN(nn.Module):
def __init__(self, args):
super(FNN, self).__init__()
self.args = args
self.width = args.width
self.depth = args.depth
fcs = []
fcs.append(nn.Linear(28*28, self.width))
fcs.extend([nn.Linear(self.width, self.width) for i in range(self.depth-1)])
self.fcs = nn.ModuleList(fcs)
self.fc_classifier = nn.Linear(fcs[-1].out_features, args.num_classes)
def forward(self, x):
a = []
x = torch.flatten(x, 1)
for fc in self.fcs:
x = F.relu(fc(x))
a.append(x)
x = self.fc_classifier(x)
return x, a
def train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples, X, Y, Z):
model.train()
Hs = []
loss = None
for batch_idx, (data, target) in enumerate(train_loader):
start = time.time()
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output, A = model(data)
# Add loss if necessary to optimize into the correct direction
loss, _, _ = criterion((output, A), target)
loss.backward()
optimizer.step()
end = time.time()
seen_samples += output.shape[0]
if batch_idx % args.log_interval == 0:
entropies = [batch_entropy(a) for a in A]
H_out = batch_entropy(A[-1])
pred = output.argmax(dim=1, keepdim=True)
correct = pred.eq(target.view_as(pred)).sum().item()
train_acc = correct / output.shape[0]
Y.extend([np.log(1+e.cpu().detach().numpy()) for e in entropies])
X.extend([i for i in range(len(entropies))])
Z.extend([seen_samples/1000 for _ in range(len(entropies))])
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tAccuracy: {:.4f}\tLoss: {:.4f}\tTime: {:.4f}\tH_last: {}\t'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_acc, loss.item(), end - start, H_out))
return seen_samples, X, Y, Z
test_acc_sliding = []
def test(model, device, test_loader, criterion, seen_samples):
global test_acc_sliding
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output, A = model(data)
test_loss += criterion((output, A), target)[0]
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss = test_loss / len(test_loader)
test_acc = correct / len(test_loader.dataset)
# Sliding avg of test acc over the last epochs
test_acc_sliding.append(test_acc)
test_acc_sliding = test_acc_sliding[-5:]
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), test_acc * 100))
return test_acc
def main():
# Init dataset
transform_train=transforms.Compose([
transforms.RandomAffine(degrees=20, translate=(0.1,0.1), scale=(0.9, 1.1)),
transforms.ColorJitter(brightness=0.2, contrast=0.2),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_train = datasets.MNIST('.data', train=1, download=True, transform=transform_train)
transform_test=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_test = datasets.MNIST('.data', train=False, transform=transform_test)
train_kwargs = {'batch_size': args.batch_size, "shuffle": True}
test_kwargs = {'batch_size': args.test_batch_size, "shuffle": False}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
train_loader = torch.utils.data.DataLoader(ds_train,**train_kwargs)
test_loader = torch.utils.data.DataLoader(ds_test, **test_kwargs)
model = FNN(args=args).to(device)
criterion = LBELoss(args.depth, lbe_alpha=args.lbe_alpha, lbe_beta=args.lbe_beta) if args.lbe_beta != 0.0 else CELoss()
params = list(model.parameters()) + list(criterion.parameters())
optimizer = optim.Adam(params, lr=args.learning_rate)
# Note that pytorch calls kaiming per default via reset_parameters in __init__:
# https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py#L81
seen_samples = 0
epoch = 0
X = []
Y = []
Z = []
for epoch in range(args.epochs):
seen_samples, X, Y, Z = train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples, X, Y, Z)
accuracy = test(model, device, test_loader, criterion, seen_samples)
plt.title("L=%d, LBE_beta=%.1f, Accuracy=%.2f%%" % (args.depth, args.lbe_beta, accuracy*100.0))
plt.ylabel("Batch Entropy")
plt.xlabel("Layer")
points = plt.scatter(X, Y, s=150, c=Z, cmap='jet')
plt.colorbar(points)
plt.tight_layout()
plt.savefig(f"info_flow_{args.depth}_{args.lbe_beta}.png")
return accuracy
if __name__ == '__main__':
main() | 7,561 | 38.181347 | 124 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_normalization/batch_entropy.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import scipy
import scipy.stats
import random
def batch_entropy(x):
""" Estimate the differential entropy by assuming a gaussian distribution of
values for different samples of a mini-batch.
"""
if(x.shape[0] <= 1):
raise Exception("The batch entropy can only be calculated for |batch| > 1.")
x = torch.flatten(x, start_dim=1)
x_std = torch.std(x, dim=0)
entropies = 0.5 * torch.log(np.pi * np.e * x_std**2 + 1)
return torch.mean(entropies)
class LBELoss(nn.Module):
""" Computation of the LBE + CE loss.
See also https://www.wolframalpha.com/input/?i=%28%28x-0.8%29*0.5%29**2+for+x+from+0+to+2+y+from+0+to+0.5
"""
def __init__(self, num, lbe_alpha=0.5, lbe_alpha_min=0.2, lbe_beta=0.5):
super(LBELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
lbe_alpha = torch.ones(num) * lbe_alpha
self.lbe_alpha_p = torch.nn.Parameter(lbe_alpha, requires_grad=True)
self.lbe_alpha_min = torch.FloatTensor([lbe_alpha_min]).to("cuda")
self.lbe_beta = lbe_beta
def lbe_per_layer(self, a, i):
lbe_alpha_l = torch.abs(self.lbe_alpha_p[i])
lbe_l = (batch_entropy(a)-torch.maximum(self.lbe_alpha_min, lbe_alpha_l))**2
return lbe_l * self.lbe_beta
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
if A is None:
return ce, ce, torch.zeros(1)
losses = [self.lbe_per_layer(a, i) for i, a in enumerate(A)]
lbe = torch.mean(torch.stack(losses)) * ce
return ce+lbe, ce, lbe
class CELoss(nn.Module):
""" Wrapper around the Cross Entropy loss to be compatible with models
that output intermediate results.
"""
def __init__(self):
super(CELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
self.lbe_alpha_p = torch.zeros(1)
self.lbe_beta = torch.zeros(1)
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
return ce, ce, 0.0
| 2,157 | 31.208955 | 113 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_normalization/train.py | from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import argparse
from typing import Dict
import torch
import copy
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.weight_norm import weight_norm
import torch.optim as optim
import torch_optimizer as optim_special
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from batch_entropy import batch_entropy, LBELoss, CELoss
import time
import numpy as np
import wandb
parser = argparse.ArgumentParser(description='Conflicting bundl--deptes with PyTorch and MNIST')
parser.add_argument('--arch', type=str, default="FNN", metavar='S',
help='Architecture - For WanDB filtering')
parser.add_argument('--depth', type=int, default=500, metavar='S',
help='Depth of network')
parser.add_argument('--width', type=int, default=1000, metavar='S',
help='Width of network')
parser.add_argument("--norm", type=str, default=None, choices=["BatchNorm", "LayerNorm", "WeightNorm", "SELU", "None"], help="Type of normalization to use in each hidden layer")
parser.add_argument('--num_classes', type=int, default=10,
help='Number of classes used for predictions')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size', type=int, default=1024, metavar='N',
help='input batch size for testing (default: 1024)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--learning_rate', type=float, default=2e-6, metavar='Learning rate',
help='learning rate (default: 1.0)')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save_model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
# Get device
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print(f"Using device {device}")
class FNN(nn.Module):
def __init__(self, args):
super(FNN, self).__init__()
self.args = args
self.width = args.width
self.depth = args.depth
self.norm = args.norm
self.activation_fn = F.selu if self.norm == "SELU" else F.relu
sizes = [28 * 28] + [self.width] * (self.depth - 1)
layers = []
for in_features, out_features in zip(sizes[0:-1], sizes[1:]):
layer: Dict[str, nn.Module] = {
"fc": nn.Linear(in_features, out_features)
}
if self.norm == "BatchNorm":
layer["norm"] = nn.BatchNorm1d(out_features)
elif self.norm == "LayerNorm":
layer["norm"] = nn.LayerNorm(out_features)
elif self.norm == "WeightNorm":
layer["fc"] = weight_norm(nn.Linear(in_features, out_features))
if self.norm == "SELU":
layer["activation"] = nn.SELU()
else:
layer["activation"] = nn.ReLU()
layers.append(nn.ModuleDict(layer))
self.layers = nn.ModuleList(layers)
self.fc_classifier = nn.Linear(self.width, args.num_classes)
def forward(self, x):
a = []
a_pre_norm = []
h = torch.flatten(x, 1)
for layer in self.layers:
h = layer["fc"](h)
if "norm" in layer:
a_pre_norm.append(h)
h = layer["norm"](h)
h = layer["activation"](h)
a.append(h)
y = self.fc_classifier(h)
return y, a, a_pre_norm
def train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples):
model.train()
Hs = []
loss = None
for batch_idx, (data, target) in enumerate(train_loader):
start = time.time()
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output, A, A_pre_norm = model(data)
# Add loss if necessary to optimize into the correct direction
loss, ce_loss, lbe_loss = criterion((output, A), target)
loss.backward()
optimizer.step()
end = time.time()
seen_samples += output.shape[0]
if batch_idx % args.log_interval == 0:
entropies = [batch_entropy(a) for a in A]
H_out = entropies[-1]
H_avg = torch.mean(torch.stack(entropies))
if len(A_pre_norm) > 0:
entropies_pre_norm = [batch_entropy(a_pre_norm) for a_pre_norm in A_pre_norm]
H_out_pre_norm = entropies_pre_norm[-1]
H_avg_pre_norm = torch.mean(torch.stack(entropies_pre_norm))
pred = output.argmax(dim=1, keepdim=True)
correct = pred.eq(target.view_as(pred)).sum().item()
train_acc = correct / output.shape[0]
lbe_alpha_mean = torch.mean(criterion.lbe_alpha_p)
lbe_alpha_min = torch.min(criterion.lbe_alpha_p)
lbe_alpha_max = torch.max(criterion.lbe_alpha_p)
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tAccuracy: {:.4f}\tLoss: {:.4f}\tTime: {:.4f}\tH_last: {}\t'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_acc, loss.item(), end - start, H_out))
log = {
"train/h_out": H_out,
"train/h_avg": H_avg,
"train/loss": loss,
"train/loss_lbe": lbe_loss,
"train/loss_ce": ce_loss,
"train/accuracy": train_acc,
"train/lbe_alpha_p": lbe_alpha_mean,
"train/lbe_alpha_p_min": lbe_alpha_min,
"train/lbe_alpha_p_max": lbe_alpha_max,
}
if len(A_pre_norm) > 0:
log["train/h_out_pre_norm"] = H_out_pre_norm
log["train/h_avg_pre_norm"] = H_avg_pre_norm
wandb.log(log, step=seen_samples)
return seen_samples
test_acc_sliding = {}
def test(name, model, device, test_loader, criterion, seen_samples):
global test_acc_sliding
model.eval()
test_loss = 0
correct = 0
test_acc_sliding[name] = [] if name not in test_acc_sliding else test_acc_sliding[name]
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output, A, _ = model(data)
test_loss += criterion((output, A), target)[0]
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss = test_loss / len(test_loader)
test_acc = correct / len(test_loader.dataset)
# Sliding avg of test acc over the last epochs
test_acc_sliding[name].append(test_acc)
test_acc_sliding[name] = test_acc_sliding[name][-5:]
print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(name,
test_loss, correct, len(test_loader.dataset), test_acc * 100))
wandb.log({
f"{name}/accuracy": np.mean(test_acc_sliding[name]),
f"{name}/loss_ce": test_loss
}, step=seen_samples)
return test_acc
def main():
# Init dataset
transform_train=transforms.Compose([
transforms.RandomAffine(degrees=20, translate=(0.1,0.1), scale=(0.9, 1.1)),
transforms.ColorJitter(brightness=0.2, contrast=0.2),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_train = datasets.MNIST('.data', train=1, download=True, transform=transform_train)
ds_train, ds_eval = torch.utils.data.random_split(ds_train, [50000, 10000])
transform_test=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_test = datasets.MNIST('.data', train=False, transform=transform_test)
wandb.init(config=args)
train_kwargs = {'batch_size': args.batch_size, "shuffle": True}
test_kwargs = {'batch_size': args.test_batch_size, "shuffle": False}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
train_loader = torch.utils.data.DataLoader(ds_train,**train_kwargs)
test_loader = torch.utils.data.DataLoader(ds_test, **test_kwargs)
eval_loader = torch.utils.data.DataLoader(ds_eval, **test_kwargs)
model = FNN(args=args).to(device)
# criterion = LBELoss(args.depth-2, lbe_alpha=args.lbe_alpha, lbe_beta=args.lbe_beta) if args.lbe_beta != 0.0 else CELoss()
criterion = CELoss()
params = list(model.parameters()) + list(criterion.parameters())
optimizer = optim.Adam(params, lr=args.learning_rate)
# Note that pytorch calls kaiming per default via reset_parameters in __init__:
# https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py#L81
seen_samples = 0
epoch = 0
for epoch in range(args.epochs):
seen_samples = train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples)
if(epoch % 5 == 0):
accuracy = test("eval", model, device, eval_loader, criterion, seen_samples)
accuracy = test("test", model, device, test_loader, criterion, seen_samples)
return accuracy
if __name__ == '__main__':
main() | 9,979 | 38.760956 | 177 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_deep_vanilla_fnn/batch_entropy.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import scipy
import scipy.stats
import random
def batch_entropy(x):
""" Estimate the differential entropy by assuming a gaussian distribution of
values for different samples of a mini-batch.
"""
if(x.shape[0] <= 1):
raise Exception("The batch entropy can only be calculated for |batch| > 1.")
x = torch.flatten(x, start_dim=1)
x_std = torch.std(x, dim=0)
entropies = 0.5 * torch.log(np.pi * np.e * x_std**2 + 1)
return torch.mean(entropies)
class LBELoss(nn.Module):
""" Computation of the LBE + CE loss.
See also https://www.wolframalpha.com/input/?i=%28%28x-0.8%29*0.5%29**2+for+x+from+0+to+2+y+from+0+to+0.5
"""
def __init__(self, num, lbe_alpha=0.5, lbe_alpha_min=0.5, lbe_beta=0.5):
super(LBELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
lbe_alpha = torch.ones(num) * lbe_alpha
self.lbe_alpha_p = torch.nn.Parameter(lbe_alpha, requires_grad=True)
self.lbe_alpha_min = torch.FloatTensor([lbe_alpha_min]).to("cuda")
self.lbe_beta = lbe_beta
def lbe_per_layer(self, a, i):
lbe_alpha_l = torch.abs(self.lbe_alpha_p[i])
lbe_l = (batch_entropy(a)-torch.maximum(self.lbe_alpha_min, lbe_alpha_l))**2
return lbe_l * self.lbe_beta
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
if A is None:
return ce, ce, torch.zeros(1)
losses = [self.lbe_per_layer(a, i) for i, a in enumerate(A)]
lbe = torch.mean(torch.stack(losses)) * ce
return ce+lbe, ce, lbe
class CELoss(nn.Module):
""" Wrapper around the Cross Entropy loss to be compatible with models
that output intermediate results.
"""
def __init__(self):
super(CELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
self.lbe_alpha_p = torch.zeros(1)
self.lbe_beta = torch.zeros(1)
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
return ce, ce, 0.0
| 2,157 | 31.208955 | 113 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_deep_vanilla_fnn/train.py | from __future__ import print_function
import argparse
import torch
import copy
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch_optimizer as optim_special
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from batch_entropy import batch_entropy, LBELoss, CELoss
import time
import numpy as np
import wandb
parser = argparse.ArgumentParser(description='Conflicting bundl--deptes with PyTorch and MNIST')
parser.add_argument('--arch', type=str, default="FNN", metavar='S',
help='Architecture - For WanDB filtering')
parser.add_argument('--depth', type=int, default=500, metavar='S',
help='Depth of network')
parser.add_argument('--width', type=int, default=1000, metavar='S',
help='Width of network')
parser.add_argument('--num_classes', type=int, default=10,
help='Number of classes used for predictions')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size', type=int, default=128, metavar='N',
help='input batch size for testing (default: 1024)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--learning_rate', type=float, default=1e-5, metavar='Learning rate',
help='learning rate (default: 1.0)')
parser.add_argument('--lbe_alpha', type=float, default=2.6,
help='Desired entropy at the beginning of trainig.')
parser.add_argument('--lbe_alpha_min', type=float, default=0.2,
help='Desired entropy at the beginning of trainig.')
parser.add_argument('--lbe_beta', type=float, default=1e-2,
help='Weight lbe loss.')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save_model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
if((args.lbe_alpha == 0 and args.lbe_beta != 0) or (args.lbe_alpha != 0 and args.lbe_beta == 0)):
wandb.finish(exit_code=0)
exit()
# Get device
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print(f"Using device {device}")
class FNN(nn.Module):
def __init__(self, args):
super(FNN, self).__init__()
self.args = args
self.width = args.width
self.depth = args.depth
self.fc_in = nn.Linear(28*28, self.width)
fcs = [nn.Linear(self.width, self.width) for i in range(self.depth-2)]
self.fcs = nn.ModuleList(fcs)
self.fc_embeddings = nn.Linear(self.width, self.width)
self.fc_classifier = nn.Linear(fcs[-1].out_features, args.num_classes)
def forward(self, x):
a = []
x = torch.flatten(x, 1)
x = F.relu(self.fc_in(x))
for fc in self.fcs:
x = F.relu(fc(x))
a.append(x)
x = F.relu(self.fc_embeddings(x))
x = self.fc_classifier(x)
return x, a
def train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples):
model.train()
Hs = []
loss = None
for batch_idx, (data, target) in enumerate(train_loader):
start = time.time()
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output, A = model(data)
# Add loss if necessary to optimize into the correct direction
loss, ce_loss, lbe_loss = criterion((output, A), target)
loss.backward()
optimizer.step()
end = time.time()
seen_samples += output.shape[0]
if batch_idx % args.log_interval == 0:
entropies = [batch_entropy(a) for a in A]
H_out = entropies[-1]
H_avg = torch.mean(torch.stack(entropies))
H_pos_count = np.sum([1 if e > 0 else 0 for e in entropies])
pred = output.argmax(dim=1, keepdim=True)
correct = pred.eq(target.view_as(pred)).sum().item()
train_acc = correct / output.shape[0]
lbe_alpha_mean = torch.mean(criterion.lbe_alpha_p)
lbe_alpha_min = torch.min(criterion.lbe_alpha_p)
lbe_alpha_max = torch.max(criterion.lbe_alpha_p)
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tAccuracy: {:.4f}\tLoss: {:.4f}\tTime: {:.4f}\tH_last: {}\t H_avg: {} \t H_pos_count {}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_acc, loss.item(), end - start, H_out, H_avg, H_pos_count))
wandb.log({
"train/h_out": H_out,
"train/h_avg": H_avg,
"train/h_pos_count": H_pos_count,
"train/loss": loss,
"train/loss_lbe": lbe_loss,
"train/loss_ce": ce_loss,
"train/accuracy": train_acc,
"train/lbe_alpha_p": lbe_alpha_mean,
"train/lbe_alpha_p_min": lbe_alpha_min,
"train/lbe_alpha_p_max": lbe_alpha_max,
}, step=seen_samples)
return seen_samples
test_acc_sliding = {}
def test(name, model, device, test_loader, criterion, seen_samples):
global test_acc_sliding
model.eval()
test_loss = 0
correct = 0
test_acc_sliding[name] = [] if name not in test_acc_sliding else test_acc_sliding[name]
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output, A = model(data)
test_loss += criterion((output, A), target)[0]
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss = test_loss / len(test_loader)
test_acc = correct / len(test_loader.dataset)
# Sliding avg of test acc over the last epochs
test_acc_sliding[name].append(test_acc)
test_acc_sliding[name] = test_acc_sliding[name][-5:]
print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(name,
test_loss, correct, len(test_loader.dataset), test_acc * 100))
wandb.log({
f"{name}/accuracy": np.mean(test_acc_sliding[name]),
f"{name}/loss_ce": test_loss
}, step=seen_samples)
return test_acc
def main():
# Init dataset
transform_train=transforms.Compose([
transforms.RandomAffine(degrees=20, translate=(0.1,0.1), scale=(0.9, 1.1)),
transforms.ColorJitter(brightness=0.2, contrast=0.2),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_train = datasets.MNIST('.data', train=1, download=True, transform=transform_train)
ds_train, ds_eval = torch.utils.data.random_split(ds_train, [50000, 10000])
transform_test=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_test = datasets.MNIST('.data', train=False, transform=transform_test)
wandb.init(config=args)
train_kwargs = {'batch_size': args.batch_size, "shuffle": True}
test_kwargs = {'batch_size': args.test_batch_size, "shuffle": False}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
train_loader = torch.utils.data.DataLoader(ds_train,**train_kwargs)
test_loader = torch.utils.data.DataLoader(ds_test, **test_kwargs)
eval_loader = torch.utils.data.DataLoader(ds_eval, **test_kwargs)
model = FNN(args=args).to(device)
criterion = LBELoss(args.depth-1, lbe_alpha=args.lbe_alpha, lbe_alpha_min=args.lbe_alpha_min, lbe_beta=args.lbe_beta) if args.lbe_beta != 0.0 else CELoss()
params = list(model.parameters()) + list(criterion.parameters())
optimizer = optim.Adam(params, lr=args.learning_rate)
# Note that pytorch calls kaiming per default via reset_parameters in __init__:
# https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py#L81
seen_samples = 0
epoch = 0
for epoch in range(args.epochs):
seen_samples = train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples)
if(epoch % 5 == 0):
accuracy = test("eval", model, device, eval_loader, criterion, seen_samples)
accuracy = test("test", model, device, test_loader, criterion, seen_samples)
return accuracy
if __name__ == '__main__':
main()
| 9,103 | 39.283186 | 159 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_transformer/batch_entropy.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import scipy
import scipy.stats
import random
def batch_entropy(x):
""" Estimate the differential entropy by assuming a gaussian distribution of
values for different samples of a mini-batch.
"""
if(x.shape[0] <= 1):
raise Exception("The batch entropy can only be calculated for |batch| > 1.")
x = torch.flatten(x, start_dim=1)
x_std = torch.std(x, dim=0)
entropies = 0.5 * torch.log(np.pi * np.e * x_std**2 + 1)
return torch.mean(entropies)
class LBELoss(nn.Module):
""" Computation of the LBE + CE loss.
See also https://www.wolframalpha.com/input/?i=%28%28x-0.8%29*0.5%29**2+for+x+from+0+to+2+y+from+0+to+0.5
"""
def __init__(self, num, lbe_alpha=0.5, lbe_alpha_min=0.2, lbe_beta=0.5):
super(LBELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
lbe_alpha = torch.ones(num) * lbe_alpha
self.lbe_alpha_p = torch.nn.Parameter(lbe_alpha, requires_grad=True)
self.lbe_alpha_min = torch.FloatTensor([lbe_alpha_min]).to("cuda")
self.lbe_beta = lbe_beta
def lbe_per_layer(self, a, i):
lbe_alpha_l = torch.abs(self.lbe_alpha_p[i])
lbe_l = (batch_entropy(a)-torch.maximum(self.lbe_alpha_min, lbe_alpha_l))**2
return lbe_l * self.lbe_beta
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
if A is None:
return ce, ce, torch.zeros(1)
losses = [self.lbe_per_layer(a, i) for i, a in enumerate(A)]
lbe = torch.mean(torch.stack(losses)) * ce
return ce+lbe, ce, lbe
class CELoss(nn.Module):
""" Wrapper around the Cross Entropy loss to be compatible with models
that output intermediate results.
"""
def __init__(self):
super(CELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
self.lbe_alpha_p = torch.zeros(1)
self.lbe_beta = torch.zeros(1)
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
return ce, ce, 0.0
| 2,157 | 31.208955 | 113 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_transformer/run_glue.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
from gc import callbacks
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
from sqlalchemy import null
import wandb
from transformers.integrations import WandbCallback
import torch
import datasets
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from torch.nn import CrossEntropyLoss
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from batch_entropy import batch_entropy, CELoss, LBELoss
from models import model_factory
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
# check_min_version("4.16.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
logger = logging.getLogger(__name__)
class CustomLossTrainer(Trainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
model = kwargs["model"]
self.num_labels = model.config.num_labels
self.num_hidden_layers = model.config.num_hidden_layers
self.args = kwargs["args"]
if self.args.lbe_beta != 0.0:
self.criterion = LBELoss(
self.num_hidden_layers + 1,
lbe_alpha=self.args.lbe_alpha,
lbe_beta=self.args.lbe_beta)
else:
self.criterion = CELoss()
# This way we add it to be optimized by the optimizer...
model.lbe_alpha = self.criterion.lbe_alpha_p
model.to("cuda")
def compute_loss(self, model, inputs, return_outputs=False):
is_training = model.module.training if hasattr(model, "module") else model.training
labels = inputs.get("labels").view(-1)
outputs = model(**inputs, output_hidden_states=is_training)
logits = outputs.get('logits').view(-1, self.num_labels)
hidden_states = outputs.get('hidden_states') if is_training else None
loss, ce_loss, lbe_loss = self.criterion((logits, hidden_states), labels)
# Log loss information to wandb
if self.state.global_step % self.args.eval_steps == 0 and is_training:
entropies = [batch_entropy(a) for a in hidden_states]
# for i, e in enumerate(entropies):
# print("Layer %d: %.2f" % (i, e.cpu()))
H_out = entropies[-1]
H_avg = torch.mean(torch.stack(entropies))
lbe_alpha_mean = torch.mean(self.criterion.lbe_alpha_p)
lbe_alpha_min = torch.min(self.criterion.lbe_alpha_p)
lbe_alpha_max = torch.max(self.criterion.lbe_alpha_p)
wandb.log({
"train/loss_ce": ce_loss,
"train/loss_lbe": lbe_loss,
"train/h_out": H_out,
"train/h_avg": H_avg,
"train/loss": loss,
"train/lbe_alpha_p": lbe_alpha_mean,
"train/lbe_alpha_p_min": lbe_alpha_min,
"train/lbe_alpha_p_max": lbe_alpha_max,
}, step=self.state.global_step)
return (loss, outputs) if return_outputs else loss
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
)
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the validation data."}
)
test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in task_to_keys.keys():
raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
elif self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.")
else:
train_extension = self.train_file.split(".")[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
validation_extension = self.validation_file.split(".")[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class CustomTrainingArguments(TrainingArguments):
lbe_beta: float = field(
default=0.0,
metadata={
"help": "LBE_beta value to control beta loss."
},
)
lbe_alpha: float = field(
default=1.0,
metadata={
"help": "How LBE alpha values are initialized."
},
)
is_hpo: bool = field(
default=False,
metadata={
"help": "Informational for lather evaluation and reporting in wandb."
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir)
elif data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
raw_datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Loading a dataset from local json files
raw_datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = raw_datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = model_factory.from_pretrained(model_args, config)
# model = AutoModelForSequenceClassification.from_pretrained(
# model_args.model_name_or_path,
# from_tf=bool(".ckpt" in model_args.model_name_or_path),
# config=config,
# cache_dir=model_args.cache_dir,
# revision=model_args.model_revision,
# use_auth_token=True if model_args.use_auth_token else None,
# )
# Preprocessing the raw_datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
if label_to_id is not None:
model.config.label2id = label_to_id
model.config.id2label = {id: label for label, id in config.label2id.items()}
elif data_args.task_name is not None and not is_regression:
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {id: label for label, id in config.label2id.items()}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]]
return result
with training_args.main_process_first(desc="dataset map pre-processing"):
raw_datasets = raw_datasets.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
#
# Creat train/eval/test split
#
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"].shuffle(seed=training_args.seed)
# Split into train and eval
eval_size = int(len(train_dataset) * 0.2)
eval_dataset = train_dataset.select(range(0, eval_size))
train_dataset = train_dataset.select(range(eval_size, len(train_dataset)))
# Create test set
test_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("glue", data_args.task_name)
else:
metric = load_metric("accuracy")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
class WandbInit(WandbCallback):
def __init__(self):
super().__init__()
def setup(self, args, state, model, **kwargs):
# If hyperparams are invalid we stop - this also speedups the hparam
if((training_args.lbe_alpha == 0 and training_args.lbe_beta != 0) or (training_args.lbe_alpha != 0 and training_args.lbe_beta == 0)):
wandb.finish(exit_code=0)
exit()
super().setup(args, state, model, **kwargs)
if state.is_world_process_zero:
custom_dict = {
"lbe_beta": training_args.lbe_beta,
"lbe_alpha": training_args.lbe_alpha,
"dataset": data_args.task_name,
"arch": "NLP",
"model": model_args.model_name_or_path,
}
self._wandb.config.update(custom_dict, allow_val_change=True)
self._wandb.Api().flush()
wandb_cb = WandbInit()
trainer = CustomLossTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks=[wandb_cb]
)
#
# Training
#
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics_eval = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics_eval["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics_eval)
# trainer.save_metrics("train", metrics_eval)
trainer.save_state()
#
# Eval at last step
#
trainer.evaluate(eval_dataset=eval_dataset, metric_key_prefix="eval")
#
# Test
#
logger.info("*** Test ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
test_datasets.append(raw_datasets["validation_mismatched"])
for test_dataset, task in zip(test_datasets, tasks):
# Now we disable automatic logging of the evaluation to ensure
# that our test results do not overwrite the eval results...
metrics_test = trainer.evaluate(eval_dataset=test_dataset, metric_key_prefix="test")
metrics_test["test_samples"] = len(test_dataset)
# We used the official eval set as test set, because we split
# an eval set from the training set... therefore we rename it
# to be consistent across all experiments.
log_dict = {}
for key in metrics_test:
val = metrics_test[key]
metric = key.replace("test_", "test/")
log_dict[metric] = val
wandb.log(log_dict)
wandb.Api().flush()
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if data_args.task_name is not None:
kwargs["language"] = "en"
kwargs["dataset_tags"] = "glue"
kwargs["dataset_args"] = data_args.task_name
kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}"
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 27,224 | 40.063348 | 145 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_transformer/models/bert/modeling_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
before_skip = hidden_states
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states, before_skip
class BertAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output, before_skip = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs, before_skip
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
intermediate_output = hidden_states
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states, intermediate_output
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BertAttention(config, position_embedding_type="absolute")
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs, before_skip_1 = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output, before_skip_2 = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs, before_skip_1, before_skip_2
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output, before_skip = self.output(intermediate_output, attention_output)
return layer_output, before_skip
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs, before_skip_1, before_skip_2 = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs, before_skip_1, before_skip_2 = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
if output_hidden_states:
# all_hidden_states += (before_skip_1,)
all_hidden_states += (before_skip_2,)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
# if output_hidden_states:
# all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, BertEncoder):
module.gradient_checkpointing = value
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> config.is_decoder = True
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
if self.config.pad_token_id is None:
raise ValueError("The PAD token should be defined for generation")
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
if(output_hidden_states):
outputs.hidden_states += (pooled_output, )
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 80,584 | 41.682733 | 213 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_loss_surface/batch_entropy.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import scipy
import scipy.stats
import random
def batch_entropy(x):
""" Estimate the differential entropy by assuming a gaussian distribution of
values for different samples of a mini-batch.
"""
if(x.shape[0] <= 1):
raise Exception("The batch entropy can only be calculated for |batch| > 1.")
x = torch.flatten(x, start_dim=1)
x_std = torch.std(x, dim=0)
entropies = 0.5 * torch.log(np.pi * np.e * x_std**2 + 1)
return torch.mean(entropies)
class LBELoss(nn.Module):
""" Computation of the LBE + CE loss.
See also https://www.wolframalpha.com/input/?i=%28%28x-0.8%29*0.5%29**2+for+x+from+0+to+2+y+from+0+to+0.5
"""
def __init__(self, num, lbe_alpha=0.5, lbe_alpha_min=0.2, lbe_beta=0.5):
super(LBELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
lbe_alpha = torch.ones(num) * lbe_alpha
self.lbe_alpha_p = torch.nn.Parameter(lbe_alpha, requires_grad=True)
self.lbe_alpha_min = torch.FloatTensor([lbe_alpha_min]).to("cuda")
self.lbe_beta = lbe_beta
def lbe_per_layer(self, a, i):
lbe_alpha_l = torch.abs(self.lbe_alpha_p[i])
lbe_l = (batch_entropy(a)-torch.maximum(self.lbe_alpha_min, lbe_alpha_l))**2
return lbe_l * self.lbe_beta
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
if A is None:
return ce, ce, torch.zeros(1)
losses = [self.lbe_per_layer(a, i) for i, a in enumerate(A)]
lbe = torch.mean(torch.stack(losses)) * ce
return ce+lbe, ce, lbe
class CELoss(nn.Module):
""" Wrapper around the Cross Entropy loss to be compatible with models
that output intermediate results.
"""
def __init__(self):
super(CELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
self.lbe_alpha_p = torch.zeros(1)
self.lbe_beta = torch.zeros(1)
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
return ce, ce, 0.0
| 2,157 | 31.208955 | 113 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_loss_surface/utils.py | import torch
import numpy as np
import copy
# Thanks to https://gitlab.com/qbeer/loss-landscape/-/blob/main/loss_landscape/landscape_utils.py
def init_directions(model):
noises = []
n_params = 0
for name, param in model.named_parameters():
delta = torch.normal(.0, 1, size=param.size())
nu = torch.normal(.0, 1, size=param.size())
param_norm = torch.norm(param)
delta_norm = torch.norm(delta)
nu_norm = torch.norm(nu)
delta /= delta_norm
delta *= param_norm
nu /= nu_norm
nu *= param_norm
noises.append((delta, nu))
n_params += np.prod(param.size())
print(f'A total of {n_params:,} parameters.')
return noises
def init_network(model, all_noises, alpha, beta):
with torch.no_grad():
for param, noises in zip(model.parameters(), all_noises):
delta, nu = noises
new_value = param + alpha * delta + beta * nu
param.copy_(new_value)
return model
| 1,013 | 23.731707 | 97 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_loss_surface/create_loss_surface.py | from __future__ import print_function
import argparse
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from batch_entropy import batch_entropy, LBELoss, CELoss
import time
import utils
import copy
from tqdm import tqdm
import numpy as np
import random
from plot import generate_plots
# Very good FAQ for loss surface plots:
# https://losslandscape.com/faq/
class FNN(nn.Module):
def __init__(self, args):
super(FNN, self).__init__()
self.args = args
self.width = args.width
self.depth = args.depth
self.fc_in = nn.Linear(28*28, self.width)
fcs = [nn.Linear(self.width, self.width) for i in range(self.depth-2)]
self.fcs = nn.ModuleList(fcs)
self.fc_embeddings = nn.Linear(self.width, self.width)
self.fc_classifier = nn.Linear(fcs[-1].out_features, args.num_classes)
def forward(self, x):
a = []
x = torch.flatten(x, 1)
x = F.relu(self.fc_in(x))
for fc in self.fcs:
x = F.relu(fc(x))
a.append(x)
x = F.relu(self.fc_embeddings(x))
x = self.fc_classifier(x)
return x, a
def train_epoch(args, model, criterion, device, train_loader, optimizer, steps):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
steps += 1
if steps > args.steps:
return steps
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output, A = model(data)
loss, _, _ = criterion((output, A), target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
h_a = batch_entropy(A[-1])
print('Step: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} H: {:.6f}'.format(
steps, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), h_a))
return steps
def evaluate(args, model, criterion, device, dataset_loader):
model.train()
ces = []
acc = []
lbes = []
losses = []
eval_size = 20
# Evaluate test acc / loss
with torch.no_grad():
for batch_idx, (data, target) in enumerate(dataset_loader):
data, target = data.to(device), target.to(device)
output, A = model(data)
loss, ce, lbe = criterion((output, A), target)
losses.append(loss)
ces.append(ce)
lbes.append(lbe)
pred = output.argmax(dim=1, keepdim=True)
acc.append(pred.eq(target.view_as(pred)).sum().item() / len(data))
if len(lbes) > eval_size:
break
ces = np.mean(ces)
acc = np.mean(acc)
lbes = np.mean(lbes)
losses = ces+lbes
return ces, acc, lbes, losses
def main():
# Training settings
parser = argparse.ArgumentParser(description='Batch Entropy with PyTorch and MNIST')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1024, metavar='N',
help='input batch size for testing (default: 1024)')
parser.add_argument('--steps', type=int, default=10, metavar='N',
help='number of steps to train (default: 14)')
parser.add_argument('--lr', type=float, default=0.0003, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--no-cuda', action='store_true', default=True,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--lbe_alpha', type=float, default=0.0,
help='Desired entropy at the beginning of trainig.')
parser.add_argument('--lbe_beta', type=float, default=0.0,
help='Weight lbe loss.')
parser.add_argument('--depth', type=int, default=30,
help='Depth of the model')
parser.add_argument('--width', type=int, default=500,
help='Width of the model')
parser.add_argument('--num_classes', type=int, default=10,
help='Number of classes of the dataset')
parser.add_argument('--resolution', type=int, default=10, metavar='N',
help='Resolution of loss plot')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
random.seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print("Using device %s" % device)
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_train = datasets.MNIST('.data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(ds_train,**train_kwargs)
steps = 0
model = FNN(args).to(device)
criterion = LBELoss(args.depth-2, lbe_alpha=args.lbe_alpha, lbe_beta=args.lbe_beta) if args.lbe_beta != 0.0 else CELoss()
noises = utils.init_directions(model)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
epoch = 0
while(steps < args.steps):
#
# Plot loss surface
#
if epoch % 1 == 0:
def load_model():
return copy.deepcopy(model)
RESOLUTION = args.resolution
A, B = np.meshgrid(np.linspace(-1, 1, RESOLUTION),
np.linspace(-1, 1, RESOLUTION), indexing='ij')
ces = np.empty_like(A)
accs = np.empty_like(A)
lbes = np.empty_like(A)
losses = np.empty_like(A)
for i in range(RESOLUTION):
for j in range(RESOLUTION):
alpha = A[i, j]
beta = B[i, j]
net = utils.init_network(load_model(), noises, alpha, beta).to(device)
ce, acc, lbe, loss = evaluate(args, net, criterion, device, train_loader)
lbes[i, j] = lbe
ces[i, j] = ce
accs[i, j] = acc
losses[i, j] = loss
del net
print(f'alpha : {alpha:.2f}, beta : {beta:.2f}, ce : {ce:.2f}, lbe : {lbe:.2f}')
torch.cuda.empty_cache()
path = f"./generated/lbe_{args.lbe_beta}/depth_{args.depth}/steps_{steps}"
if not os.path.exists(path):
os.makedirs(path)
np.save(f"{path}/ce.npy", ces)
np.save(f"{path}/lbe.npy", lbes)
np.save(f"{path}/loss.npy", losses)
np.save(f"{path}/acc.npy", accs)
np.save(f"{path}/X.npy", A)
np.save(f"{path}/Y.npy", B)
args.path = path
print("Generate plots...")
generate_plots(args)
#
# Train one epoch
#
steps += train_epoch(args, model, criterion, device, train_loader, optimizer, steps)
ce, acc, lbe, loss = evaluate(args, model, criterion, device, train_loader)
print(f"steps={steps} | loss={loss} | lbe={lbe} | ce={ce} | acc={acc}")
epoch += 1
if __name__ == '__main__':
main()
| 7,982 | 35.122172 | 125 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_deep_vanilla_cnn/batch_entropy.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import scipy
import scipy.stats
import random
def batch_entropy(x):
""" Estimate the differential entropy by assuming a gaussian distribution of
values for different samples of a mini-batch.
"""
if(x.shape[0] <= 1):
raise Exception("The batch entropy can only be calculated for |batch| > 1.")
x = torch.flatten(x, start_dim=1)
x_std = torch.std(x, dim=0)
entropies = 0.5 * torch.log(np.pi * np.e * x_std**2 + 1)
return torch.mean(entropies)
class LBELoss(nn.Module):
""" Computation of the LBE + CE loss.
See also https://www.wolframalpha.com/input/?i=%28%28x-0.8%29*0.5%29**2+for+x+from+0+to+2+y+from+0+to+0.5
"""
def __init__(self, num, lbe_alpha=0.5, lbe_alpha_min=0.5, lbe_beta=0.5):
super(LBELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
lbe_alpha = torch.ones(num) * lbe_alpha
self.lbe_alpha_p = torch.nn.Parameter(lbe_alpha, requires_grad=True)
self.lbe_alpha_min = torch.FloatTensor([lbe_alpha_min]).to("cuda")
self.lbe_beta = lbe_beta
def lbe_per_layer(self, a, i):
lbe_alpha_l = torch.abs(self.lbe_alpha_p[i])
lbe_l = (batch_entropy(a)-torch.maximum(self.lbe_alpha_min, lbe_alpha_l))**2
return lbe_l * self.lbe_beta
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
if A is None:
return ce, ce, torch.zeros(1)
losses = [self.lbe_per_layer(a, i) for i, a in enumerate(A)]
lbe = torch.mean(torch.stack(losses)) * ce
return ce+lbe, ce, lbe
class CELoss(nn.Module):
""" Wrapper around the Cross Entropy loss to be compatible with models
that output intermediate results.
"""
def __init__(self):
super(CELoss, self).__init__()
self.ce = nn.CrossEntropyLoss()
self.lbe_alpha_p = torch.zeros(1)
self.lbe_beta = torch.zeros(1)
def __call__(self, output, target):
output, A = output
ce = self.ce(output, target)
return ce, ce, 0.0
| 2,157 | 31.208955 | 113 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_deep_vanilla_cnn/delta_orth.py | import math
import torch
""" The implementation below corresponds to Tensorflow implementation.
Refer https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/init_ops.py for details.
From https://github.com/yl-1993/ConvDeltaOrthogonal-Init/
We tried this version as well as the version implemented in train.py, but failed.
"""
def init_delta_orthogonal_1(tensor, gain=1.):
r"""Initializer that generates a delta orthogonal kernel for ConvNets.
The shape of the tensor must have length 3, 4 or 5. The number of input
filters must not exceed the number of output filters. The center pixels of the
tensor form an orthogonal matrix. Other pixels are set to be zero. See
algorithm 2 in [Xiao et al., 2018]: https://arxiv.org/abs/1806.05393
Args:
tensor: an n-dimensional `torch.Tensor`, where :math:`3 \leq n \leq 5`
gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1.
Examples:
>>> w = torch.empty(5, 4, 3, 3)
>>> nn.init.conv_delta_orthogonal(w)
"""
if tensor.ndimension() < 3 or tensor.ndimension() > 5:
raise ValueError("The tensor to initialize must be at least "
"three-dimensional and at most five-dimensional")
if tensor.size(1) > tensor.size(0):
raise ValueError("In_channels cannot be greater than out_channels.")
# Generate a random matrix
a = tensor.new(tensor.size(0), tensor.size(0)).normal_(0, 1)
# Compute the qr factorization
q, r = torch.qr(a)
# Make Q uniform
d = torch.diag(r, 0)
q *= d.sign()
q = q[:, :tensor.size(1)]
with torch.no_grad():
tensor.zero_()
if tensor.ndimension() == 3:
tensor[:, :, (tensor.size(2)-1)//2] = q
elif tensor.ndimension() == 4:
tensor[:, :, (tensor.size(2)-1)//2, (tensor.size(3)-1)//2] = q
else:
tensor[:, :, (tensor.size(2)-1)//2, (tensor.size(3)-1)//2, (tensor.size(4)-1)//2] = q
tensor.mul_(math.sqrt(gain))
return tensor
#
# Test also a second method with orthogonal initialization
#
def _gen_orthogonal(dim):
""" Thanks to https://github.com/JiJingYu/delta_orthogonal_init_pytorch/
"""
a = torch.zeros((dim, dim)).normal_(0, 1)
q, r = torch.qr(a)
d = torch.diag(r, 0).sign()
diag_size = d.size(0)
d_exp = d.view(1, diag_size).expand(diag_size, diag_size)
q.mul_(d_exp)
return q
def init_delta_orthogonal_2(weights, gain):
""" Thanks to https://github.com/JiJingYu/delta_orthogonal_init_pytorch/
"""
rows = weights.size(0)
cols = weights.size(1)
if rows > cols:
print("In_filters should not be greater than out_filters.")
weights.data.fill_(0)
dim = max(rows, cols)
q = _gen_orthogonal(dim)
mid1 = weights.size(2) // 2
mid2 = weights.size(3) // 2
with torch.no_grad():
weights[:, :, mid1, mid2] = q[:weights.size(0), :weights.size(1)]
weights.mul_(gain)
| 2,997 | 35.120482 | 109 | py |
layerwise-batch-entropy | layerwise-batch-entropy-main/experiment_deep_vanilla_cnn/train.py | from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from delta_orth import init_delta_orthogonal_1, init_delta_orthogonal_2
from torchvision import datasets, transforms
from batch_entropy import batch_entropy, LBELoss, CELoss
import time
import numpy as np
import wandb
parser = argparse.ArgumentParser(description='Conflicting bundl--deptes with PyTorch and MNIST')
parser.add_argument('--arch', type=str, default="CNN", metavar='S',
help='Architecture - For WanDB filtering')
parser.add_argument('--dataset', type=str, default="mnist", metavar='S',
help='mnist, cifar')
parser.add_argument('--name', type=str, default=None, metavar='S',
help='WanDB run name')
parser.add_argument('--init_method', type=str, default="delta_orthogonal", metavar='S',
help='random, delta_orthogonal')
parser.add_argument('--depth', type=int, default=500, metavar='S',
help='Depth of network')
parser.add_argument('--filters', type=int, default=8, metavar='S',
help='Number of filters')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size', type=int, default=128, metavar='N',
help='input batch size for testing (default: 1024)')
parser.add_argument('--epochs', type=int, default=1000, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--learning_rate', type=float, default=1e-4, metavar='Learning rate',
help='learning rate (default: 1.0)')
parser.add_argument('--lbe_alpha', type=float, default=2.0,
help='Desired entropy at the beginning of trainig.')
parser.add_argument('--lbe_alpha_min', type=float, default=2.0,
help='Desired entropy at the beginning of trainig.')
parser.add_argument('--lbe_beta', type=float, default=1e-1,
help='Weight lbe loss.')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save_model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
if((args.lbe_alpha == 0 and args.lbe_beta != 0) or (args.lbe_alpha != 0 and args.lbe_beta == 0)):
wandb.finish(exit_code=0)
exit()
# Get device
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print(f"Using device {device}")
class CNN(nn.Module):
def __init__(self, args):
super(CNN, self).__init__()
self.args = args
self.filters = args.filters
self.depth = args.depth
self.size = args.size
self.dim = args.dim
self.act_fn = F.tanh if args.init_method == "delta_orthogonal" else F.relu
self.conv_in = nn.Conv2d(self.dim, self.filters, 3, stride=1, padding=1)
conv_layer = [nn.Conv2d(self.filters, self.filters, 3, stride=1, padding=1) for _ in range(self.depth-2)]
self.conv_layer = nn.ModuleList(conv_layer)
self.fc = nn.Linear(self.filters*self.size*self.size, self.size*self.size*2)
self.out = nn.Linear(self.size*self.size*2, args.num_classes)
# For comparison against delta orthogonal initialization
# as proposed by https://arxiv.org/abs/1806.05393
if args.init_method == "delta_orthogonal":
self._initialize_delte_orthogonal()
def _initialize_delte_orthogonal(self):
""" We tested both, init_delta_orthogonal_1 as well as init_delta_orthogonal_2
"""
init_delta_orthogonal_1(self.conv_in.weight)
for conv in self.conv_layer:
init_delta_orthogonal_1(conv.weight)
# init_delta_orthogonal_1(self.conv_in.weight, init.calculate_gain('relu'))
# for conv in self.conv_layer:
# init_delta_orthogonal_1(conv.weight, init.calculate_gain('relu'))
def forward(self, x):
a = []
x = self.act_fn(self.conv_in(x))
for conv in self.conv_layer:
x = self.act_fn(conv(x))
a.append(x)
x = torch.flatten(x, 1)
x = self.act_fn(self.fc(x))
a.append(x)
x = self.out(x)
return x, a
def train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples):
model.train()
Hs = []
loss = None
for batch_idx, (data, target) in enumerate(train_loader):
start = time.time()
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output, A = model(data)
# Add loss if necessary to optimize into the correct direction
loss, ce_loss, lbe_loss = criterion((output, A), target)
loss.backward()
optimizer.step()
end = time.time()
seen_samples += output.shape[0]
if batch_idx % args.log_interval == 0:
entropies = [batch_entropy(a) for a in A]
H_out = entropies[-1]
H_avg = torch.mean(torch.stack(entropies))
H_pos_count = np.sum([1 if e > 0 else 0 for e in entropies])
pred = output.argmax(dim=1, keepdim=True)
correct = pred.eq(target.view_as(pred)).sum().item()
train_acc = correct / output.shape[0]
lbe_alpha_mean = torch.mean(criterion.lbe_alpha_p)
lbe_alpha_min = torch.min(criterion.lbe_alpha_p)
lbe_alpha_max = torch.max(criterion.lbe_alpha_p)
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tAccuracy: {:.4f}\tLoss: {:.4f}\tTime: {:.4f}\tH_last: {}\t H_avg: {} \t H_pos_count {}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_acc, loss.item(), end - start, H_out, H_avg, H_pos_count))
wandb.log({
"train/h_out": H_out,
"train/h_avg": H_avg,
"train/h_pos_count": H_pos_count,
"train/loss": loss,
"train/loss_lbe": lbe_loss,
"train/loss_ce": ce_loss,
"train/accuracy": train_acc,
"train/lbe_alpha_p": lbe_alpha_mean,
"train/lbe_alpha_p_min": lbe_alpha_min,
"train/lbe_alpha_p_max": lbe_alpha_max,
}, step=seen_samples)
return seen_samples
test_acc_sliding = {}
def test(name, model, device, test_loader, criterion, seen_samples):
global test_acc_sliding
model.eval()
test_loss = 0
correct = 0
test_acc_sliding[name] = [] if name not in test_acc_sliding else test_acc_sliding[name]
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output, A = model(data)
test_loss += criterion((output, A), target)[0]
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss = test_loss / len(test_loader)
test_acc = correct / len(test_loader.dataset)
# Sliding avg of test acc over the last epochs
test_acc_sliding[name].append(test_acc)
test_acc_sliding[name] = test_acc_sliding[name][-1:]
print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(name,
test_loss, correct, len(test_loader.dataset), test_acc * 100))
wandb.log({
f"{name}/accuracy": np.mean(test_acc_sliding[name]),
f"{name}/loss_ce": test_loss
}, step=seen_samples)
return test_acc
def main():
if args.dataset == "mnist":
args.num_classes = 10
args.dim = 1
args.size = 28
# Init dataset
transform_train=transforms.Compose([
transforms.RandomAffine(degrees=20, translate=(0.1,0.1), scale=(0.9, 1.1)),
transforms.ColorJitter(brightness=0.2, contrast=0.2),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_train = datasets.MNIST('.data', train=1, download=True, transform=transform_train)
transform_test=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
ds_test = datasets.MNIST('.data', train=False, transform=transform_test)
elif args.dataset == "cifar10":
args.num_classes = 10
args.dim = 3
args.size = 32
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
ds_train = datasets.CIFAR10(
root='./data', train=True, download=True, transform=transform_train)
ds_test = datasets.CIFAR10(
root='./data', train=False, download=True, transform=transform_test)
wandb.init(name=args.name, config=args)
train_kwargs = {'batch_size': args.batch_size, "shuffle": True}
test_kwargs = {'batch_size': args.test_batch_size, "shuffle": False}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
train_loader = torch.utils.data.DataLoader(ds_train,**train_kwargs)
test_loader = torch.utils.data.DataLoader(ds_test, **test_kwargs)
model = CNN(args=args).to(device)
criterion = LBELoss(args.depth-1, lbe_alpha=args.lbe_alpha, lbe_alpha_min=args.lbe_alpha_min, lbe_beta=args.lbe_beta) if args.lbe_beta != 0.0 else CELoss()
params = list(model.parameters()) + list(criterion.parameters())
optimizer = optim.Adam(params, lr=args.learning_rate)
# Note that pytorch calls kaiming per default via reset_parameters in __init__:
# https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py#L81
seen_samples = 0
epoch = 0
for epoch in range(args.epochs):
seen_samples = train(args, model, device, train_loader, optimizer, epoch, criterion, seen_samples)
if(epoch % 1 == 0):
accuracy = test("test", model, device, test_loader, criterion, seen_samples)
return accuracy
if __name__ == '__main__':
main() | 11,081 | 38.72043 | 159 | py |
swav | swav-main/eval_linear.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import time
from logging import getLogger
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from src.utils import (
bool_flag,
initialize_exp,
restart_from_checkpoint,
fix_random_seeds,
AverageMeter,
init_distributed_mode,
accuracy,
)
import src.resnet50 as resnet_models
logger = getLogger()
parser = argparse.ArgumentParser(description="Evaluate models: Linear classification on ImageNet")
#########################
#### main parameters ####
#########################
parser.add_argument("--dump_path", type=str, default=".",
help="experiment dump path for checkpoints and log")
parser.add_argument("--seed", type=int, default=31, help="seed")
parser.add_argument("--data_path", type=str, default="/path/to/imagenet",
help="path to dataset repository")
parser.add_argument("--workers", default=10, type=int,
help="number of data loading workers")
#########################
#### model parameters ###
#########################
parser.add_argument("--arch", default="resnet50", type=str, help="convnet architecture")
parser.add_argument("--pretrained", default="", type=str, help="path to pretrained weights")
parser.add_argument("--global_pooling", default=True, type=bool_flag,
help="if True, we use the resnet50 global average pooling")
parser.add_argument("--use_bn", default=False, type=bool_flag,
help="optionally add a batchnorm layer before the linear classifier")
#########################
#### optim parameters ###
#########################
parser.add_argument("--epochs", default=100, type=int,
help="number of total epochs to run")
parser.add_argument("--batch_size", default=32, type=int,
help="batch size per gpu, i.e. how many unique instances per gpu")
parser.add_argument("--lr", default=0.3, type=float, help="initial learning rate")
parser.add_argument("--wd", default=1e-6, type=float, help="weight decay")
parser.add_argument("--nesterov", default=False, type=bool_flag, help="nesterov momentum")
parser.add_argument("--scheduler_type", default="cosine", type=str, choices=["step", "cosine"])
# for multi-step learning rate decay
parser.add_argument("--decay_epochs", type=int, nargs="+", default=[60, 80],
help="Epochs at which to decay learning rate.")
parser.add_argument("--gamma", type=float, default=0.1, help="decay factor")
# for cosine learning rate schedule
parser.add_argument("--final_lr", type=float, default=0, help="final learning rate")
#########################
#### dist parameters ###
#########################
parser.add_argument("--dist_url", default="env://", type=str,
help="url used to set up distributed training")
parser.add_argument("--world_size", default=-1, type=int, help="""
number of processes: it is set automatically and
should not be passed as argument""")
parser.add_argument("--rank", default=0, type=int, help="""rank of this process:
it is set automatically and should not be passed as argument""")
parser.add_argument("--local_rank", default=0, type=int,
help="this argument is not used and should be ignored")
def main():
global args, best_acc
args = parser.parse_args()
init_distributed_mode(args)
fix_random_seeds(args.seed)
logger, training_stats = initialize_exp(
args, "epoch", "loss", "prec1", "prec5", "loss_val", "prec1_val", "prec5_val"
)
# build data
train_dataset = datasets.ImageFolder(os.path.join(args.data_path, "train"))
val_dataset = datasets.ImageFolder(os.path.join(args.data_path, "val"))
tr_normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.228, 0.224, 0.225]
)
train_dataset.transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
tr_normalize,
])
val_dataset.transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
tr_normalize,
])
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
sampler=sampler,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
)
logger.info("Building data done")
# build model
model = resnet_models.__dict__[args.arch](output_dim=0, eval_mode=True)
linear_classifier = RegLog(1000, args.arch, args.global_pooling, args.use_bn)
# convert batch norm layers (if any)
linear_classifier = nn.SyncBatchNorm.convert_sync_batchnorm(linear_classifier)
# model to gpu
model = model.cuda()
linear_classifier = linear_classifier.cuda()
linear_classifier = nn.parallel.DistributedDataParallel(
linear_classifier,
device_ids=[args.gpu_to_work_on],
find_unused_parameters=True,
)
model.eval()
# load weights
if os.path.isfile(args.pretrained):
state_dict = torch.load(args.pretrained, map_location="cuda:" + str(args.gpu_to_work_on))
if "state_dict" in state_dict:
state_dict = state_dict["state_dict"]
# remove prefixe "module."
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
for k, v in model.state_dict().items():
if k not in list(state_dict):
logger.info('key "{}" could not be found in provided state dict'.format(k))
elif state_dict[k].shape != v.shape:
logger.info('key "{}" is of different shape in model and provided state dict'.format(k))
state_dict[k] = v
msg = model.load_state_dict(state_dict, strict=False)
logger.info("Load pretrained model with msg: {}".format(msg))
else:
logger.info("No pretrained weights found => training with random weights")
# set optimizer
optimizer = torch.optim.SGD(
linear_classifier.parameters(),
lr=args.lr,
nesterov=args.nesterov,
momentum=0.9,
weight_decay=args.wd,
)
# set scheduler
if args.scheduler_type == "step":
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, args.decay_epochs, gamma=args.gamma
)
elif args.scheduler_type == "cosine":
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, args.epochs, eta_min=args.final_lr
)
# Optionally resume from a checkpoint
to_restore = {"epoch": 0, "best_acc": 0.}
restart_from_checkpoint(
os.path.join(args.dump_path, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=linear_classifier,
optimizer=optimizer,
scheduler=scheduler,
)
start_epoch = to_restore["epoch"]
best_acc = to_restore["best_acc"]
cudnn.benchmark = True
for epoch in range(start_epoch, args.epochs):
# train the network for one epoch
logger.info("============ Starting epoch %i ... ============" % epoch)
# set samplers
train_loader.sampler.set_epoch(epoch)
scores = train(model, linear_classifier, optimizer, train_loader, epoch)
scores_val = validate_network(val_loader, model, linear_classifier)
training_stats.update(scores + scores_val)
scheduler.step()
# save checkpoint
if args.rank == 0:
save_dict = {
"epoch": epoch + 1,
"state_dict": linear_classifier.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"best_acc": best_acc,
}
torch.save(save_dict, os.path.join(args.dump_path, "checkpoint.pth.tar"))
logger.info("Training of the supervised linear classifier on frozen features completed.\n"
"Top-1 test accuracy: {acc:.1f}".format(acc=best_acc))
class RegLog(nn.Module):
"""Creates logistic regression on top of frozen features"""
def __init__(self, num_labels, arch="resnet50", global_avg=False, use_bn=True):
super(RegLog, self).__init__()
self.bn = None
if global_avg:
if arch == "resnet50":
s = 2048
elif arch == "resnet50w2":
s = 4096
elif arch == "resnet50w4":
s = 8192
self.av_pool = nn.AdaptiveAvgPool2d((1, 1))
else:
assert arch == "resnet50"
s = 8192
self.av_pool = nn.AvgPool2d(6, stride=1)
if use_bn:
self.bn = nn.BatchNorm2d(2048)
self.linear = nn.Linear(s, num_labels)
self.linear.weight.data.normal_(mean=0.0, std=0.01)
self.linear.bias.data.zero_()
def forward(self, x):
# average pool the final feature map
x = self.av_pool(x)
# optional BN
if self.bn is not None:
x = self.bn(x)
# flatten
x = x.view(x.size(0), -1)
# linear layer
return self.linear(x)
def train(model, reglog, optimizer, loader, epoch):
"""
Train the models on the dataset.
"""
# running statistics
batch_time = AverageMeter()
data_time = AverageMeter()
# training statistics
top1 = AverageMeter()
top5 = AverageMeter()
losses = AverageMeter()
end = time.perf_counter()
model.eval()
reglog.train()
criterion = nn.CrossEntropyLoss().cuda()
for iter_epoch, (inp, target) in enumerate(loader):
# measure data loading time
data_time.update(time.perf_counter() - end)
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
with torch.no_grad():
output = model(inp)
output = reglog(output)
# compute cross entropy loss
loss = criterion(output, target)
# compute the gradients
optimizer.zero_grad()
loss.backward()
# step
optimizer.step()
# update stats
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), inp.size(0))
top1.update(acc1[0], inp.size(0))
top5.update(acc5[0], inp.size(0))
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
# verbose
if args.rank == 0 and iter_epoch % 50 == 0:
logger.info(
"Epoch[{0}] - Iter: [{1}/{2}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Prec {top1.val:.3f} ({top1.avg:.3f})\t"
"LR {lr}".format(
epoch,
iter_epoch,
len(loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1,
lr=optimizer.param_groups[0]["lr"],
)
)
return epoch, losses.avg, top1.avg.item(), top5.avg.item()
def validate_network(val_loader, model, linear_classifier):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
global best_acc
# switch to evaluate mode
model.eval()
linear_classifier.eval()
criterion = nn.CrossEntropyLoss().cuda()
with torch.no_grad():
end = time.perf_counter()
for i, (inp, target) in enumerate(val_loader):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = linear_classifier(model(inp))
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), inp.size(0))
top1.update(acc1[0], inp.size(0))
top5.update(acc5[0], inp.size(0))
# measure elapsed time
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
if top1.avg.item() > best_acc:
best_acc = top1.avg.item()
if args.rank == 0:
logger.info(
"Test:\t"
"Time {batch_time.avg:.3f}\t"
"Loss {loss.avg:.4f}\t"
"Acc@1 {top1.avg:.3f}\t"
"Best Acc@1 so far {acc:.1f}".format(
batch_time=batch_time, loss=losses, top1=top1, acc=best_acc))
return losses.avg, top1.avg.item(), top5.avg.item()
if __name__ == "__main__":
main()
| 13,429 | 33.260204 | 104 | py |
swav | swav-main/main_deepclusterv2.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import math
import os
import shutil
import time
from logging import getLogger
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import apex
from apex.parallel.LARC import LARC
from scipy.sparse import csr_matrix
from src.utils import (
bool_flag,
initialize_exp,
restart_from_checkpoint,
fix_random_seeds,
AverageMeter,
init_distributed_mode,
)
from src.multicropdataset import MultiCropDataset
import src.resnet50 as resnet_models
logger = getLogger()
parser = argparse.ArgumentParser(description="Implementation of DeepCluster-v2")
#########################
#### data parameters ####
#########################
parser.add_argument("--data_path", type=str, default="/path/to/imagenet",
help="path to dataset repository")
parser.add_argument("--nmb_crops", type=int, default=[2], nargs="+",
help="list of number of crops (example: [2, 6])")
parser.add_argument("--size_crops", type=int, default=[224], nargs="+",
help="crops resolutions (example: [224, 96])")
parser.add_argument("--min_scale_crops", type=float, default=[0.14], nargs="+",
help="argument in RandomResizedCrop (example: [0.14, 0.05])")
parser.add_argument("--max_scale_crops", type=float, default=[1], nargs="+",
help="argument in RandomResizedCrop (example: [1., 0.14])")
#########################
## dcv2 specific params #
#########################
parser.add_argument("--crops_for_assign", type=int, nargs="+", default=[0, 1],
help="list of crops id used for computing assignments")
parser.add_argument("--temperature", default=0.1, type=float,
help="temperature parameter in training loss")
parser.add_argument("--feat_dim", default=128, type=int,
help="feature dimension")
parser.add_argument("--nmb_prototypes", default=[3000, 3000, 3000], type=int, nargs="+",
help="number of prototypes - it can be multihead")
#########################
#### optim parameters ###
#########################
parser.add_argument("--epochs", default=100, type=int,
help="number of total epochs to run")
parser.add_argument("--batch_size", default=64, type=int,
help="batch size per gpu, i.e. how many unique instances per gpu")
parser.add_argument("--base_lr", default=4.8, type=float, help="base learning rate")
parser.add_argument("--final_lr", type=float, default=0, help="final learning rate")
parser.add_argument("--freeze_prototypes_niters", default=1e10, type=int,
help="freeze the prototypes during this many iterations from the start")
parser.add_argument("--wd", default=1e-6, type=float, help="weight decay")
parser.add_argument("--warmup_epochs", default=10, type=int, help="number of warmup epochs")
parser.add_argument("--start_warmup", default=0, type=float,
help="initial warmup learning rate")
#########################
#### dist parameters ###
#########################
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up distributed
training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--world_size", default=-1, type=int, help="""
number of processes: it is set automatically and
should not be passed as argument""")
parser.add_argument("--rank", default=0, type=int, help="""rank of this process:
it is set automatically and should not be passed as argument""")
parser.add_argument("--local_rank", default=0, type=int,
help="this argument is not used and should be ignored")
#########################
#### other parameters ###
#########################
parser.add_argument("--arch", default="resnet50", type=str, help="convnet architecture")
parser.add_argument("--hidden_mlp", default=2048, type=int,
help="hidden layer dimension in projection head")
parser.add_argument("--workers", default=10, type=int,
help="number of data loading workers")
parser.add_argument("--checkpoint_freq", type=int, default=25,
help="Save the model periodically")
parser.add_argument("--sync_bn", type=str, default="pytorch", help="synchronize bn")
parser.add_argument("--syncbn_process_group_size", type=int, default=8, help=""" see
https://github.com/NVIDIA/apex/blob/master/apex/parallel/__init__.py#L58-L67""")
parser.add_argument("--dump_path", type=str, default=".",
help="experiment dump path for checkpoints and log")
parser.add_argument("--seed", type=int, default=31, help="seed")
def main():
global args
args = parser.parse_args()
init_distributed_mode(args)
fix_random_seeds(args.seed)
logger, training_stats = initialize_exp(args, "epoch", "loss")
# build data
train_dataset = MultiCropDataset(
args.data_path,
args.size_crops,
args.nmb_crops,
args.min_scale_crops,
args.max_scale_crops,
return_index=True,
)
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
sampler=sampler,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
drop_last=True
)
logger.info("Building data done with {} images loaded.".format(len(train_dataset)))
# build model
model = resnet_models.__dict__[args.arch](
normalize=True,
hidden_mlp=args.hidden_mlp,
output_dim=args.feat_dim,
nmb_prototypes=args.nmb_prototypes,
)
# synchronize batch norm layers
if args.sync_bn == "pytorch":
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
elif args.sync_bn == "apex":
# with apex syncbn we sync bn per group because it speeds up computation
# compared to global syncbn
process_group = apex.parallel.create_syncbn_process_group(args.syncbn_process_group_size)
model = apex.parallel.convert_syncbn_model(model, process_group=process_group)
# copy model to GPU
model = model.cuda()
if args.rank == 0:
logger.info(model)
logger.info("Building model done.")
# build optimizer
optimizer = torch.optim.SGD(
model.parameters(),
lr=args.base_lr,
momentum=0.9,
weight_decay=args.wd,
)
optimizer = LARC(optimizer=optimizer, trust_coefficient=0.001, clip=False)
warmup_lr_schedule = np.linspace(args.start_warmup, args.base_lr, len(train_loader) * args.warmup_epochs)
iters = np.arange(len(train_loader) * (args.epochs - args.warmup_epochs))
cosine_lr_schedule = np.array([args.final_lr + 0.5 * (args.base_lr - args.final_lr) * (1 + \
math.cos(math.pi * t / (len(train_loader) * (args.epochs - args.warmup_epochs)))) for t in iters])
lr_schedule = np.concatenate((warmup_lr_schedule, cosine_lr_schedule))
logger.info("Building optimizer done.")
# wrap model
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[args.gpu_to_work_on],
find_unused_parameters=True,
)
# optionally resume from a checkpoint
to_restore = {"epoch": 0}
restart_from_checkpoint(
os.path.join(args.dump_path, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=model,
optimizer=optimizer,
)
start_epoch = to_restore["epoch"]
# build the memory bank
mb_path = os.path.join(args.dump_path, "mb" + str(args.rank) + ".pth")
if os.path.isfile(mb_path):
mb_ckp = torch.load(mb_path)
local_memory_index = mb_ckp["local_memory_index"]
local_memory_embeddings = mb_ckp["local_memory_embeddings"]
else:
local_memory_index, local_memory_embeddings = init_memory(train_loader, model)
cudnn.benchmark = True
for epoch in range(start_epoch, args.epochs):
# train the network for one epoch
logger.info("============ Starting epoch %i ... ============" % epoch)
# set sampler
train_loader.sampler.set_epoch(epoch)
# train the network
scores, local_memory_index, local_memory_embeddings = train(
train_loader,
model,
optimizer,
epoch,
lr_schedule,
local_memory_index,
local_memory_embeddings,
)
training_stats.update(scores)
# save checkpoints
if args.rank == 0:
save_dict = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
}
torch.save(
save_dict,
os.path.join(args.dump_path, "checkpoint.pth.tar"),
)
if epoch % args.checkpoint_freq == 0 or epoch == args.epochs - 1:
shutil.copyfile(
os.path.join(args.dump_path, "checkpoint.pth.tar"),
os.path.join(args.dump_checkpoints, "ckp-" + str(epoch) + ".pth"),
)
torch.save({"local_memory_embeddings": local_memory_embeddings,
"local_memory_index": local_memory_index}, mb_path)
def train(loader, model, optimizer, epoch, schedule, local_memory_index, local_memory_embeddings):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
model.train()
cross_entropy = nn.CrossEntropyLoss(ignore_index=-100)
assignments = cluster_memory(model, local_memory_index, local_memory_embeddings, len(loader.dataset))
logger.info('Clustering for epoch {} done.'.format(epoch))
end = time.time()
start_idx = 0
for it, (idx, inputs) in enumerate(loader):
# measure data loading time
data_time.update(time.time() - end)
# update learning rate
iteration = epoch * len(loader) + it
for param_group in optimizer.param_groups:
param_group["lr"] = schedule[iteration]
# ============ multi-res forward passes ... ============
emb, output = model(inputs)
emb = emb.detach()
bs = inputs[0].size(0)
# ============ deepcluster-v2 loss ... ============
loss = 0
for h in range(len(args.nmb_prototypes)):
scores = output[h] / args.temperature
targets = assignments[h][idx].repeat(sum(args.nmb_crops)).cuda(non_blocking=True)
loss += cross_entropy(scores, targets)
loss /= len(args.nmb_prototypes)
# ============ backward and optim step ... ============
optimizer.zero_grad()
loss.backward()
# cancel some gradients
if iteration < args.freeze_prototypes_niters:
for name, p in model.named_parameters():
if "prototypes" in name:
p.grad = None
optimizer.step()
# ============ update memory banks ... ============
local_memory_index[start_idx : start_idx + bs] = idx
for i, crop_idx in enumerate(args.crops_for_assign):
local_memory_embeddings[i][start_idx : start_idx + bs] = \
emb[crop_idx * bs : (crop_idx + 1) * bs]
start_idx += bs
# ============ misc ... ============
losses.update(loss.item(), inputs[0].size(0))
batch_time.update(time.time() - end)
end = time.time()
if args.rank ==0 and it % 50 == 0:
logger.info(
"Epoch: [{0}][{1}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Lr: {lr:.4f}".format(
epoch,
it,
batch_time=batch_time,
data_time=data_time,
loss=losses,
lr=optimizer.optim.param_groups[0]["lr"],
)
)
return (epoch, losses.avg), local_memory_index, local_memory_embeddings
def init_memory(dataloader, model):
size_memory_per_process = len(dataloader) * args.batch_size
local_memory_index = torch.zeros(size_memory_per_process).long().cuda()
local_memory_embeddings = torch.zeros(len(args.crops_for_assign), size_memory_per_process, args.feat_dim).cuda()
start_idx = 0
with torch.no_grad():
logger.info('Start initializing the memory banks')
for index, inputs in dataloader:
nmb_unique_idx = inputs[0].size(0)
index = index.cuda(non_blocking=True)
# get embeddings
outputs = []
for crop_idx in args.crops_for_assign:
inp = inputs[crop_idx].cuda(non_blocking=True)
outputs.append(model(inp)[0])
# fill the memory bank
local_memory_index[start_idx : start_idx + nmb_unique_idx] = index
for mb_idx, embeddings in enumerate(outputs):
local_memory_embeddings[mb_idx][
start_idx : start_idx + nmb_unique_idx
] = embeddings
start_idx += nmb_unique_idx
logger.info('Initializion of the memory banks done.')
return local_memory_index, local_memory_embeddings
def cluster_memory(model, local_memory_index, local_memory_embeddings, size_dataset, nmb_kmeans_iters=10):
j = 0
assignments = -100 * torch.ones(len(args.nmb_prototypes), size_dataset).long()
with torch.no_grad():
for i_K, K in enumerate(args.nmb_prototypes):
# run distributed k-means
# init centroids with elements from memory bank of rank 0
centroids = torch.empty(K, args.feat_dim).cuda(non_blocking=True)
if args.rank == 0:
random_idx = torch.randperm(len(local_memory_embeddings[j]))[:K]
assert len(random_idx) >= K, "please reduce the number of centroids"
centroids = local_memory_embeddings[j][random_idx]
dist.broadcast(centroids, 0)
for n_iter in range(nmb_kmeans_iters + 1):
# E step
dot_products = torch.mm(local_memory_embeddings[j], centroids.t())
_, local_assignments = dot_products.max(dim=1)
# finish
if n_iter == nmb_kmeans_iters:
break
# M step
where_helper = get_indices_sparse(local_assignments.cpu().numpy())
counts = torch.zeros(K).cuda(non_blocking=True).int()
emb_sums = torch.zeros(K, args.feat_dim).cuda(non_blocking=True)
for k in range(len(where_helper)):
if len(where_helper[k][0]) > 0:
emb_sums[k] = torch.sum(
local_memory_embeddings[j][where_helper[k][0]],
dim=0,
)
counts[k] = len(where_helper[k][0])
dist.all_reduce(counts)
mask = counts > 0
dist.all_reduce(emb_sums)
centroids[mask] = emb_sums[mask] / counts[mask].unsqueeze(1)
# normalize centroids
centroids = nn.functional.normalize(centroids, dim=1, p=2)
getattr(model.module.prototypes, "prototypes" + str(i_K)).weight.copy_(centroids)
# gather the assignments
assignments_all = torch.empty(args.world_size, local_assignments.size(0),
dtype=local_assignments.dtype, device=local_assignments.device)
assignments_all = list(assignments_all.unbind(0))
dist_process = dist.all_gather(assignments_all, local_assignments, async_op=True)
dist_process.wait()
assignments_all = torch.cat(assignments_all).cpu()
# gather the indexes
indexes_all = torch.empty(args.world_size, local_memory_index.size(0),
dtype=local_memory_index.dtype, device=local_memory_index.device)
indexes_all = list(indexes_all.unbind(0))
dist_process = dist.all_gather(indexes_all, local_memory_index, async_op=True)
dist_process.wait()
indexes_all = torch.cat(indexes_all).cpu()
# log assignments
assignments[i_K][indexes_all] = assignments_all
# next memory bank to use
j = (j + 1) % len(args.crops_for_assign)
return assignments
def get_indices_sparse(data):
cols = np.arange(data.size)
M = csr_matrix((cols, (data.ravel(), cols)), shape=(int(data.max()) + 1, data.size))
return [np.unravel_index(row.data, data.shape) for row in M]
if __name__ == "__main__":
main()
| 17,265 | 39.625882 | 123 | py |
swav | swav-main/hubconf.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torchvision.models.resnet import resnet50 as _resnet50
from src.resnet50 import resnet50w2 as _resnet50w2
from src.resnet50 import resnet50w4 as _resnet50w4
from src.resnet50 import resnet50w5 as _resnet50w5
dependencies = ["torch", "torchvision"]
def resnet50(pretrained=True, **kwargs):
"""
ResNet-50 pre-trained with SwAV.
Note that `fc.weight` and `fc.bias` are randomly initialized.
Achieves 75.3% top-1 accuracy on ImageNet when `fc` is trained.
"""
model = _resnet50(pretrained=False, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deepcluster/swav_800ep_pretrain.pth.tar",
map_location="cpu",
)
# removes "module."
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# load weights
model.load_state_dict(state_dict, strict=False)
return model
def resnet50w2(pretrained=True, **kwargs):
"""
ResNet-50-w2 pre-trained with SwAV.
"""
model = _resnet50w2(**kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deepcluster/swav_RN50w2_400ep_pretrain.pth.tar",
map_location="cpu",
)
# removes "module."
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# load weights
model.load_state_dict(state_dict, strict=False)
return model
def resnet50w4(pretrained=True, **kwargs):
"""
ResNet-50-w4 pre-trained with SwAV.
"""
model = _resnet50w4(**kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deepcluster/swav_RN50w4_400ep_pretrain.pth.tar",
map_location="cpu",
)
# removes "module."
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# load weights
model.load_state_dict(state_dict, strict=False)
return model
def resnet50w5(pretrained=True, **kwargs):
"""
ResNet-50-w5 pre-trained with SwAV.
"""
model = _resnet50w5(**kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deepcluster/swav_RN50w5_400ep_pretrain.pth.tar",
map_location="cpu",
)
# removes "module."
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# load weights
model.load_state_dict(state_dict, strict=False)
return model
| 2,830 | 31.54023 | 96 | py |
swav | swav-main/eval_semisup.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import time
from logging import getLogger
import urllib
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from src.utils import (
bool_flag,
initialize_exp,
restart_from_checkpoint,
fix_random_seeds,
AverageMeter,
init_distributed_mode,
accuracy,
)
import src.resnet50 as resnet_models
logger = getLogger()
parser = argparse.ArgumentParser(description="Evaluate models: Fine-tuning with 1% or 10% labels on ImageNet")
#########################
#### main parameters ####
#########################
parser.add_argument("--labels_perc", type=str, default="10", choices=["1", "10"],
help="fine-tune on either 1% or 10% of labels")
parser.add_argument("--dump_path", type=str, default=".",
help="experiment dump path for checkpoints and log")
parser.add_argument("--seed", type=int, default=31, help="seed")
parser.add_argument("--data_path", type=str, default="/path/to/imagenet",
help="path to imagenet")
parser.add_argument("--workers", default=10, type=int,
help="number of data loading workers")
#########################
#### model parameters ###
#########################
parser.add_argument("--arch", default="resnet50", type=str, help="convnet architecture")
parser.add_argument("--pretrained", default="", type=str, help="path to pretrained weights")
#########################
#### optim parameters ###
#########################
parser.add_argument("--epochs", default=20, type=int,
help="number of total epochs to run")
parser.add_argument("--batch_size", default=32, type=int,
help="batch size per gpu, i.e. how many unique instances per gpu")
parser.add_argument("--lr", default=0.01, type=float, help="initial learning rate - trunk")
parser.add_argument("--lr_last_layer", default=0.2, type=float, help="initial learning rate - head")
parser.add_argument("--decay_epochs", type=int, nargs="+", default=[12, 16],
help="Epochs at which to decay learning rate.")
parser.add_argument("--gamma", type=float, default=0.2, help="lr decay factor")
#########################
#### dist parameters ###
#########################
parser.add_argument("--dist_url", default="env://", type=str,
help="url used to set up distributed training")
parser.add_argument("--world_size", default=-1, type=int, help="""
number of processes: it is set automatically and
should not be passed as argument""")
parser.add_argument("--rank", default=0, type=int, help="""rank of this process:
it is set automatically and should not be passed as argument""")
parser.add_argument("--local_rank", default=0, type=int,
help="this argument is not used and should be ignored")
def main():
global args, best_acc
args = parser.parse_args()
init_distributed_mode(args)
fix_random_seeds(args.seed)
logger, training_stats = initialize_exp(
args, "epoch", "loss", "prec1", "prec5", "loss_val", "prec1_val", "prec5_val"
)
# build data
train_data_path = os.path.join(args.data_path, "train")
train_dataset = datasets.ImageFolder(train_data_path)
# take either 1% or 10% of images
subset_file = urllib.request.urlopen("https://raw.githubusercontent.com/google-research/simclr/master/imagenet_subsets/" + str(args.labels_perc) + "percent.txt")
list_imgs = [li.decode("utf-8").split('\n')[0] for li in subset_file]
train_dataset.samples = [(
os.path.join(train_data_path, li.split('_')[0], li),
train_dataset.class_to_idx[li.split('_')[0]]
) for li in list_imgs]
val_dataset = datasets.ImageFolder(os.path.join(args.data_path, "val"))
tr_normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.228, 0.224, 0.225]
)
train_dataset.transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
tr_normalize,
])
val_dataset.transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
tr_normalize,
])
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
sampler=sampler,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
)
logger.info("Building data done with {} images loaded.".format(len(train_dataset)))
# build model
model = resnet_models.__dict__[args.arch](output_dim=1000)
# convert batch norm layers
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
# load weights
if os.path.isfile(args.pretrained):
state_dict = torch.load(args.pretrained, map_location="cuda:" + str(args.gpu_to_work_on))
if "state_dict" in state_dict:
state_dict = state_dict["state_dict"]
# remove prefixe "module."
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
for k, v in model.state_dict().items():
if k not in list(state_dict):
logger.info('key "{}" could not be found in provided state dict'.format(k))
elif state_dict[k].shape != v.shape:
logger.info('key "{}" is of different shape in model and provided state dict'.format(k))
state_dict[k] = v
msg = model.load_state_dict(state_dict, strict=False)
logger.info("Load pretrained model with msg: {}".format(msg))
else:
logger.info("No pretrained weights found => training from random weights")
# model to gpu
model = model.cuda()
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[args.gpu_to_work_on],
find_unused_parameters=True,
)
# set optimizer
trunk_parameters = []
head_parameters = []
for name, param in model.named_parameters():
if 'head' in name:
head_parameters.append(param)
else:
trunk_parameters.append(param)
optimizer = torch.optim.SGD(
[{'params': trunk_parameters},
{'params': head_parameters, 'lr': args.lr_last_layer}],
lr=args.lr,
momentum=0.9,
weight_decay=0,
)
# set scheduler
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, args.decay_epochs, gamma=args.gamma
)
# Optionally resume from a checkpoint
to_restore = {"epoch": 0, "best_acc": (0., 0.)}
restart_from_checkpoint(
os.path.join(args.dump_path, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=model,
optimizer=optimizer,
scheduler=scheduler,
)
start_epoch = to_restore["epoch"]
best_acc = to_restore["best_acc"]
cudnn.benchmark = True
for epoch in range(start_epoch, args.epochs):
# train the network for one epoch
logger.info("============ Starting epoch %i ... ============" % epoch)
# set samplers
train_loader.sampler.set_epoch(epoch)
scores = train(model, optimizer, train_loader, epoch)
scores_val = validate_network(val_loader, model)
training_stats.update(scores + scores_val)
scheduler.step()
# save checkpoint
if args.rank == 0:
save_dict = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"best_acc": best_acc,
}
torch.save(save_dict, os.path.join(args.dump_path, "checkpoint.pth.tar"))
logger.info("Fine-tuning with {}% of labels completed.\n"
"Test accuracies: top-1 {acc1:.1f}, top-5 {acc5:.1f}".format(
args.labels_perc, acc1=best_acc[0], acc5=best_acc[1]))
def train(model, optimizer, loader, epoch):
"""
Train the models on the dataset.
"""
# running statistics
batch_time = AverageMeter()
data_time = AverageMeter()
# training statistics
top1 = AverageMeter()
top5 = AverageMeter()
losses = AverageMeter()
end = time.perf_counter()
model.train()
criterion = nn.CrossEntropyLoss().cuda()
for iter_epoch, (inp, target) in enumerate(loader):
# measure data loading time
data_time.update(time.perf_counter() - end)
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
output = model(inp)
# compute cross entropy loss
loss = criterion(output, target)
# compute the gradients
optimizer.zero_grad()
loss.backward()
# step
optimizer.step()
# update stats
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), inp.size(0))
top1.update(acc1[0], inp.size(0))
top5.update(acc5[0], inp.size(0))
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
# verbose
if args.rank == 0 and iter_epoch % 50 == 0:
logger.info(
"Epoch[{0}] - Iter: [{1}/{2}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Prec {top1.val:.3f} ({top1.avg:.3f})\t"
"LR trunk {lr}\t"
"LR head {lr_W}".format(
epoch,
iter_epoch,
len(loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1,
lr=optimizer.param_groups[0]["lr"],
lr_W=optimizer.param_groups[1]["lr"],
)
)
return epoch, losses.avg, top1.avg.item(), top5.avg.item()
def validate_network(val_loader, model):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
global best_acc
# switch to evaluate mode
model.eval()
criterion = nn.CrossEntropyLoss().cuda()
with torch.no_grad():
end = time.perf_counter()
for i, (inp, target) in enumerate(val_loader):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(inp)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), inp.size(0))
top1.update(acc1[0], inp.size(0))
top5.update(acc5[0], inp.size(0))
# measure elapsed time
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
if top1.avg.item() > best_acc[0]:
best_acc = (top1.avg.item(), top5.avg.item())
if args.rank == 0:
logger.info(
"Test:\t"
"Time {batch_time.avg:.3f}\t"
"Loss {loss.avg:.4f}\t"
"Acc@1 {top1.avg:.3f}\t"
"Best Acc@1 so far {acc:.1f}".format(
batch_time=batch_time, loss=losses, top1=top1, acc=best_acc[0]))
return losses.avg, top1.avg.item(), top5.avg.item()
if __name__ == "__main__":
main()
| 12,149 | 33.615385 | 165 | py |
swav | swav-main/main_swav.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import math
import os
import shutil
import time
from logging import getLogger
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import apex
from apex.parallel.LARC import LARC
from src.utils import (
bool_flag,
initialize_exp,
restart_from_checkpoint,
fix_random_seeds,
AverageMeter,
init_distributed_mode,
)
from src.multicropdataset import MultiCropDataset
import src.resnet50 as resnet_models
logger = getLogger()
parser = argparse.ArgumentParser(description="Implementation of SwAV")
#########################
#### data parameters ####
#########################
parser.add_argument("--data_path", type=str, default="/path/to/imagenet",
help="path to dataset repository")
parser.add_argument("--nmb_crops", type=int, default=[2], nargs="+",
help="list of number of crops (example: [2, 6])")
parser.add_argument("--size_crops", type=int, default=[224], nargs="+",
help="crops resolutions (example: [224, 96])")
parser.add_argument("--min_scale_crops", type=float, default=[0.14], nargs="+",
help="argument in RandomResizedCrop (example: [0.14, 0.05])")
parser.add_argument("--max_scale_crops", type=float, default=[1], nargs="+",
help="argument in RandomResizedCrop (example: [1., 0.14])")
#########################
## swav specific params #
#########################
parser.add_argument("--crops_for_assign", type=int, nargs="+", default=[0, 1],
help="list of crops id used for computing assignments")
parser.add_argument("--temperature", default=0.1, type=float,
help="temperature parameter in training loss")
parser.add_argument("--epsilon", default=0.05, type=float,
help="regularization parameter for Sinkhorn-Knopp algorithm")
parser.add_argument("--sinkhorn_iterations", default=3, type=int,
help="number of iterations in Sinkhorn-Knopp algorithm")
parser.add_argument("--feat_dim", default=128, type=int,
help="feature dimension")
parser.add_argument("--nmb_prototypes", default=3000, type=int,
help="number of prototypes")
parser.add_argument("--queue_length", type=int, default=0,
help="length of the queue (0 for no queue)")
parser.add_argument("--epoch_queue_starts", type=int, default=15,
help="from this epoch, we start using a queue")
#########################
#### optim parameters ###
#########################
parser.add_argument("--epochs", default=100, type=int,
help="number of total epochs to run")
parser.add_argument("--batch_size", default=64, type=int,
help="batch size per gpu, i.e. how many unique instances per gpu")
parser.add_argument("--base_lr", default=4.8, type=float, help="base learning rate")
parser.add_argument("--final_lr", type=float, default=0, help="final learning rate")
parser.add_argument("--freeze_prototypes_niters", default=313, type=int,
help="freeze the prototypes during this many iterations from the start")
parser.add_argument("--wd", default=1e-6, type=float, help="weight decay")
parser.add_argument("--warmup_epochs", default=10, type=int, help="number of warmup epochs")
parser.add_argument("--start_warmup", default=0, type=float,
help="initial warmup learning rate")
#########################
#### dist parameters ###
#########################
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up distributed
training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--world_size", default=-1, type=int, help="""
number of processes: it is set automatically and
should not be passed as argument""")
parser.add_argument("--rank", default=0, type=int, help="""rank of this process:
it is set automatically and should not be passed as argument""")
parser.add_argument("--local_rank", default=0, type=int,
help="this argument is not used and should be ignored")
#########################
#### other parameters ###
#########################
parser.add_argument("--arch", default="resnet50", type=str, help="convnet architecture")
parser.add_argument("--hidden_mlp", default=2048, type=int,
help="hidden layer dimension in projection head")
parser.add_argument("--workers", default=10, type=int,
help="number of data loading workers")
parser.add_argument("--checkpoint_freq", type=int, default=25,
help="Save the model periodically")
parser.add_argument("--use_fp16", type=bool_flag, default=True,
help="whether to train with mixed precision or not")
parser.add_argument("--sync_bn", type=str, default="pytorch", help="synchronize bn")
parser.add_argument("--syncbn_process_group_size", type=int, default=8, help=""" see
https://github.com/NVIDIA/apex/blob/master/apex/parallel/__init__.py#L58-L67""")
parser.add_argument("--dump_path", type=str, default=".",
help="experiment dump path for checkpoints and log")
parser.add_argument("--seed", type=int, default=31, help="seed")
def main():
global args
args = parser.parse_args()
init_distributed_mode(args)
fix_random_seeds(args.seed)
logger, training_stats = initialize_exp(args, "epoch", "loss")
# build data
train_dataset = MultiCropDataset(
args.data_path,
args.size_crops,
args.nmb_crops,
args.min_scale_crops,
args.max_scale_crops,
)
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
sampler=sampler,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
drop_last=True
)
logger.info("Building data done with {} images loaded.".format(len(train_dataset)))
# build model
model = resnet_models.__dict__[args.arch](
normalize=True,
hidden_mlp=args.hidden_mlp,
output_dim=args.feat_dim,
nmb_prototypes=args.nmb_prototypes,
)
# synchronize batch norm layers
if args.sync_bn == "pytorch":
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
elif args.sync_bn == "apex":
# with apex syncbn we sync bn per group because it speeds up computation
# compared to global syncbn
process_group = apex.parallel.create_syncbn_process_group(args.syncbn_process_group_size)
model = apex.parallel.convert_syncbn_model(model, process_group=process_group)
# copy model to GPU
model = model.cuda()
if args.rank == 0:
logger.info(model)
logger.info("Building model done.")
# build optimizer
optimizer = torch.optim.SGD(
model.parameters(),
lr=args.base_lr,
momentum=0.9,
weight_decay=args.wd,
)
optimizer = LARC(optimizer=optimizer, trust_coefficient=0.001, clip=False)
warmup_lr_schedule = np.linspace(args.start_warmup, args.base_lr, len(train_loader) * args.warmup_epochs)
iters = np.arange(len(train_loader) * (args.epochs - args.warmup_epochs))
cosine_lr_schedule = np.array([args.final_lr + 0.5 * (args.base_lr - args.final_lr) * (1 + \
math.cos(math.pi * t / (len(train_loader) * (args.epochs - args.warmup_epochs)))) for t in iters])
lr_schedule = np.concatenate((warmup_lr_schedule, cosine_lr_schedule))
logger.info("Building optimizer done.")
# init mixed precision
if args.use_fp16:
model, optimizer = apex.amp.initialize(model, optimizer, opt_level="O1")
logger.info("Initializing mixed precision done.")
# wrap model
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[args.gpu_to_work_on]
)
# optionally resume from a checkpoint
to_restore = {"epoch": 0}
restart_from_checkpoint(
os.path.join(args.dump_path, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=model,
optimizer=optimizer,
amp=apex.amp,
)
start_epoch = to_restore["epoch"]
# build the queue
queue = None
queue_path = os.path.join(args.dump_path, "queue" + str(args.rank) + ".pth")
if os.path.isfile(queue_path):
queue = torch.load(queue_path)["queue"]
# the queue needs to be divisible by the batch size
args.queue_length -= args.queue_length % (args.batch_size * args.world_size)
cudnn.benchmark = True
for epoch in range(start_epoch, args.epochs):
# train the network for one epoch
logger.info("============ Starting epoch %i ... ============" % epoch)
# set sampler
train_loader.sampler.set_epoch(epoch)
# optionally starts a queue
if args.queue_length > 0 and epoch >= args.epoch_queue_starts and queue is None:
queue = torch.zeros(
len(args.crops_for_assign),
args.queue_length // args.world_size,
args.feat_dim,
).cuda()
# train the network
scores, queue = train(train_loader, model, optimizer, epoch, lr_schedule, queue)
training_stats.update(scores)
# save checkpoints
if args.rank == 0:
save_dict = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
}
if args.use_fp16:
save_dict["amp"] = apex.amp.state_dict()
torch.save(
save_dict,
os.path.join(args.dump_path, "checkpoint.pth.tar"),
)
if epoch % args.checkpoint_freq == 0 or epoch == args.epochs - 1:
shutil.copyfile(
os.path.join(args.dump_path, "checkpoint.pth.tar"),
os.path.join(args.dump_checkpoints, "ckp-" + str(epoch) + ".pth"),
)
if queue is not None:
torch.save({"queue": queue}, queue_path)
def train(train_loader, model, optimizer, epoch, lr_schedule, queue):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
model.train()
use_the_queue = False
end = time.time()
for it, inputs in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# update learning rate
iteration = epoch * len(train_loader) + it
for param_group in optimizer.param_groups:
param_group["lr"] = lr_schedule[iteration]
# normalize the prototypes
with torch.no_grad():
w = model.module.prototypes.weight.data.clone()
w = nn.functional.normalize(w, dim=1, p=2)
model.module.prototypes.weight.copy_(w)
# ============ multi-res forward passes ... ============
embedding, output = model(inputs)
embedding = embedding.detach()
bs = inputs[0].size(0)
# ============ swav loss ... ============
loss = 0
for i, crop_id in enumerate(args.crops_for_assign):
with torch.no_grad():
out = output[bs * crop_id: bs * (crop_id + 1)].detach()
# time to use the queue
if queue is not None:
if use_the_queue or not torch.all(queue[i, -1, :] == 0):
use_the_queue = True
out = torch.cat((torch.mm(
queue[i],
model.module.prototypes.weight.t()
), out))
# fill the queue
queue[i, bs:] = queue[i, :-bs].clone()
queue[i, :bs] = embedding[crop_id * bs: (crop_id + 1) * bs]
# get assignments
q = distributed_sinkhorn(out)[-bs:]
# cluster assignment prediction
subloss = 0
for v in np.delete(np.arange(np.sum(args.nmb_crops)), crop_id):
x = output[bs * v: bs * (v + 1)] / args.temperature
subloss -= torch.mean(torch.sum(q * F.log_softmax(x, dim=1), dim=1))
loss += subloss / (np.sum(args.nmb_crops) - 1)
loss /= len(args.crops_for_assign)
# ============ backward and optim step ... ============
optimizer.zero_grad()
if args.use_fp16:
with apex.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# cancel gradients for the prototypes
if iteration < args.freeze_prototypes_niters:
for name, p in model.named_parameters():
if "prototypes" in name:
p.grad = None
optimizer.step()
# ============ misc ... ============
losses.update(loss.item(), inputs[0].size(0))
batch_time.update(time.time() - end)
end = time.time()
if args.rank ==0 and it % 50 == 0:
logger.info(
"Epoch: [{0}][{1}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Lr: {lr:.4f}".format(
epoch,
it,
batch_time=batch_time,
data_time=data_time,
loss=losses,
lr=optimizer.optim.param_groups[0]["lr"],
)
)
return (epoch, losses.avg), queue
@torch.no_grad()
def distributed_sinkhorn(out):
Q = torch.exp(out / args.epsilon).t() # Q is K-by-B for consistency with notations from our paper
B = Q.shape[1] * args.world_size # number of samples to assign
K = Q.shape[0] # how many prototypes
# make the matrix sums to 1
sum_Q = torch.sum(Q)
dist.all_reduce(sum_Q)
Q /= sum_Q
for it in range(args.sinkhorn_iterations):
# normalize each row: total weight per prototype must be 1/K
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
# normalize each column: total weight per sample must be 1/B
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B # the colomns must sum to 1 so that Q is an assignment
return Q.t()
if __name__ == "__main__":
main()
| 14,998 | 38.367454 | 123 | py |
swav | swav-main/src/multicropdataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import random
from logging import getLogger
from PIL import ImageFilter
import numpy as np
import torchvision.datasets as datasets
import torchvision.transforms as transforms
logger = getLogger()
class MultiCropDataset(datasets.ImageFolder):
def __init__(
self,
data_path,
size_crops,
nmb_crops,
min_scale_crops,
max_scale_crops,
size_dataset=-1,
return_index=False,
):
super(MultiCropDataset, self).__init__(data_path)
assert len(size_crops) == len(nmb_crops)
assert len(min_scale_crops) == len(nmb_crops)
assert len(max_scale_crops) == len(nmb_crops)
if size_dataset >= 0:
self.samples = self.samples[:size_dataset]
self.return_index = return_index
color_transform = [get_color_distortion(), PILRandomGaussianBlur()]
mean = [0.485, 0.456, 0.406]
std = [0.228, 0.224, 0.225]
trans = []
for i in range(len(size_crops)):
randomresizedcrop = transforms.RandomResizedCrop(
size_crops[i],
scale=(min_scale_crops[i], max_scale_crops[i]),
)
trans.extend([transforms.Compose([
randomresizedcrop,
transforms.RandomHorizontalFlip(p=0.5),
transforms.Compose(color_transform),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
] * nmb_crops[i])
self.trans = trans
def __getitem__(self, index):
path, _ = self.samples[index]
image = self.loader(path)
multi_crops = list(map(lambda trans: trans(image), self.trans))
if self.return_index:
return index, multi_crops
return multi_crops
class PILRandomGaussianBlur(object):
"""
Apply Gaussian Blur to the PIL image. Take the radius and probability of
application as the parameter.
This transform was used in SimCLR - https://arxiv.org/abs/2002.05709
"""
def __init__(self, p=0.5, radius_min=0.1, radius_max=2.):
self.prob = p
self.radius_min = radius_min
self.radius_max = radius_max
def __call__(self, img):
do_it = np.random.rand() <= self.prob
if not do_it:
return img
return img.filter(
ImageFilter.GaussianBlur(
radius=random.uniform(self.radius_min, self.radius_max)
)
)
def get_color_distortion(s=1.0):
# s is the strength of color distortion.
color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.2*s)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
rnd_gray = transforms.RandomGrayscale(p=0.2)
color_distort = transforms.Compose([rnd_color_jitter, rnd_gray])
return color_distort
| 3,029 | 30.894737 | 76 | py |
swav | swav-main/src/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
from logging import getLogger
import pickle
import os
import numpy as np
import torch
from .logger import create_logger, PD_Stats
import torch.distributed as dist
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
logger = getLogger()
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
def init_distributed_mode(args):
"""
Initialize the following variables:
- world_size
- rank
"""
args.is_slurm_job = "SLURM_JOB_ID" in os.environ
if args.is_slurm_job:
args.rank = int(os.environ["SLURM_PROCID"])
args.world_size = int(os.environ["SLURM_NNODES"]) * int(
os.environ["SLURM_TASKS_PER_NODE"][0]
)
else:
# multi-GPU job (local or multi-node) - jobs started with torch.distributed.launch
# read environment variables
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
# prepare distributed
dist.init_process_group(
backend="nccl",
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
# set cuda device
args.gpu_to_work_on = args.rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu_to_work_on)
return
def initialize_exp(params, *args, dump_params=True):
"""
Initialize the experience:
- dump parameters
- create checkpoint repo
- create a logger
- create a panda object to keep track of the training statistics
"""
# dump parameters
if dump_params:
pickle.dump(params, open(os.path.join(params.dump_path, "params.pkl"), "wb"))
# create repo to store checkpoints
params.dump_checkpoints = os.path.join(params.dump_path, "checkpoints")
if not params.rank and not os.path.isdir(params.dump_checkpoints):
os.mkdir(params.dump_checkpoints)
# create a panda object to log loss and acc
training_stats = PD_Stats(
os.path.join(params.dump_path, "stats" + str(params.rank) + ".pkl"), args
)
# create a logger
logger = create_logger(
os.path.join(params.dump_path, "train.log"), rank=params.rank
)
logger.info("============ Initialized logger ============")
logger.info(
"\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(params)).items()))
)
logger.info("The experiment will be stored in %s\n" % params.dump_path)
logger.info("")
return logger, training_stats
def restart_from_checkpoint(ckp_paths, run_variables=None, **kwargs):
"""
Re-start from checkpoint
"""
# look for a checkpoint in exp repository
if isinstance(ckp_paths, list):
for ckp_path in ckp_paths:
if os.path.isfile(ckp_path):
break
else:
ckp_path = ckp_paths
if not os.path.isfile(ckp_path):
return
logger.info("Found checkpoint at {}".format(ckp_path))
# open checkpoint file
checkpoint = torch.load(
ckp_path, map_location="cuda:" + str(torch.distributed.get_rank() % torch.cuda.device_count())
)
# key is what to look for in the checkpoint file
# value is the object to load
# example: {'state_dict': model}
for key, value in kwargs.items():
if key in checkpoint and value is not None:
try:
msg = value.load_state_dict(checkpoint[key], strict=False)
print(msg)
except TypeError:
msg = value.load_state_dict(checkpoint[key])
logger.info("=> loaded {} from checkpoint '{}'".format(key, ckp_path))
else:
logger.warning(
"=> failed to load {} from checkpoint '{}'".format(key, ckp_path)
)
# re load variable important for the run
if run_variables is not None:
for var_name in run_variables:
if var_name in checkpoint:
run_variables[var_name] = checkpoint[var_name]
def fix_random_seeds(seed=31):
"""
Fix random seeds.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
class AverageMeter(object):
"""computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| 5,506 | 26.954315 | 102 | py |
swav | swav-main/src/resnet50.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
zero_init_residual=False,
groups=1,
widen=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
normalize=False,
output_dim=0,
hidden_mlp=0,
nmb_prototypes=0,
eval_mode=False,
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.eval_mode = eval_mode
self.padding = nn.ConstantPad2d(1, 0.0)
self.inplanes = width_per_group * widen
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
# change padding 3 -> 2 compared to original torchvision code because added a padding layer
num_out_filters = width_per_group * widen
self.conv1 = nn.Conv2d(
3, num_out_filters, kernel_size=7, stride=2, padding=2, bias=False
)
self.bn1 = norm_layer(num_out_filters)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, num_out_filters, layers[0])
num_out_filters *= 2
self.layer2 = self._make_layer(
block, num_out_filters, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
)
num_out_filters *= 2
self.layer3 = self._make_layer(
block, num_out_filters, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
)
num_out_filters *= 2
self.layer4 = self._make_layer(
block, num_out_filters, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# normalize output features
self.l2norm = normalize
# projection head
if output_dim == 0:
self.projection_head = None
elif hidden_mlp == 0:
self.projection_head = nn.Linear(num_out_filters * block.expansion, output_dim)
else:
self.projection_head = nn.Sequential(
nn.Linear(num_out_filters * block.expansion, hidden_mlp),
nn.BatchNorm1d(hidden_mlp),
nn.ReLU(inplace=True),
nn.Linear(hidden_mlp, output_dim),
)
# prototype layer
self.prototypes = None
if isinstance(nmb_prototypes, list):
self.prototypes = MultiPrototypes(output_dim, nmb_prototypes)
elif nmb_prototypes > 0:
self.prototypes = nn.Linear(output_dim, nmb_prototypes, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def forward_backbone(self, x):
x = self.padding(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.eval_mode:
return x
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x
def forward_head(self, x):
if self.projection_head is not None:
x = self.projection_head(x)
if self.l2norm:
x = nn.functional.normalize(x, dim=1, p=2)
if self.prototypes is not None:
return x, self.prototypes(x)
return x
def forward(self, inputs):
if not isinstance(inputs, list):
inputs = [inputs]
idx_crops = torch.cumsum(torch.unique_consecutive(
torch.tensor([inp.shape[-1] for inp in inputs]),
return_counts=True,
)[1], 0)
start_idx = 0
for end_idx in idx_crops:
_out = self.forward_backbone(torch.cat(inputs[start_idx: end_idx]).cuda(non_blocking=True))
if start_idx == 0:
output = _out
else:
output = torch.cat((output, _out))
start_idx = end_idx
return self.forward_head(output)
class MultiPrototypes(nn.Module):
def __init__(self, output_dim, nmb_prototypes):
super(MultiPrototypes, self).__init__()
self.nmb_heads = len(nmb_prototypes)
for i, k in enumerate(nmb_prototypes):
self.add_module("prototypes" + str(i), nn.Linear(output_dim, k, bias=False))
def forward(self, x):
out = []
for i in range(self.nmb_heads):
out.append(getattr(self, "prototypes" + str(i))(x))
return out
def resnet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def resnet50w2(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], widen=2, **kwargs)
def resnet50w4(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], widen=4, **kwargs)
def resnet50w5(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], widen=5, **kwargs)
| 11,025 | 30.146893 | 106 | py |
GPSKet | GPSKet-master/GPSKet/__init__.py | # enable x64 on jax
# must be done at 0 startup.
from jax.config import config
config.update("jax_enable_x64", True)
del config
__all__ = [
"models",
"nn",
"operator",
"optimizer",
"sampler",
"hilbert",
"driver",
"datasets",
"vqs"
]
from . import models
from . import nn
from . import operator
from . import optimizer
from . import sampler
from . import hilbert
from . import driver
from . import datasets
from . import vqs | 462 | 15.535714 | 37 | py |
GPSKet | GPSKet-master/GPSKet/nn/initializers.py | import jax
import jax.numpy as jnp
from jax import dtypes
def normal(sigma=0.1, dtype=jnp.float_):
"""
Constructs an initializer for a qGPS model.
Real parameters are normally distributed around 1.0, while complex parameters have unit length
and have normally distributed phases around 0.
Args:
sigma : width of the normal distribution
dtype : default dtype of the weights
Returns:
init function with signature `(key, shape, dtype) -> Array`
"""
if jnp.iscomplexobj(dtype):
def init_fun(key, shape, dtype=dtype):
phase = jax.random.normal(key, shape, jnp.float32)*sigma
eps = jnp.exp(1j*phase).astype(dtype)
return eps
else:
def init_fun(key, shape, dtype=dtype):
eps = jnp.ones(shape, dtype)
eps += jax.random.normal(key, shape, dtype)*sigma
return eps
return init_fun
def orthogonal(scale=1.0, column_axis=-1, dtype=jnp.float_):
"""
Constructs an initializer for a linear combination of matrices with orthogonal columns.
Args:
scale : width of the normal distribution
column_axis : the axis that contains the columns that should be orthogonal
dtype : default dtype of the weights
Returns:
init function with signature `(key, shape, dtype) -> Array`
Importantly, the shape must be 3D.
"""
def init(key, shape, dtype=dtype):
dtype = dtypes.canonicalize_dtype(dtype)
if len(shape) != 3:
raise ValueError("Orthogonal initializer requires a 3D shape.")
ortho_init = jax.nn.initializers.orthogonal(scale=scale, column_axis=column_axis, dtype=dtype)
W = jnp.zeros(shape, dtype=dtype)
keys = jax.random.split(key, shape[0])
for i in range(shape[0]):
ortho_matrix = ortho_init(keys[i], shape[-2:])
W = W.at[i].set(ortho_matrix)
return W
return init | 1,960 | 34.654545 | 102 | py |
GPSKet | GPSKet-master/GPSKet/nn/causal_conv.py | import numpy as np
import jax.numpy as jnp
from flax import linen as nn
from jax.nn.initializers import lecun_normal, zeros
from netket.utils.types import Callable, DType, Array, NNInitFunc
default_kernel_init = lecun_normal()
# Part of the code was inspired by the tutorial on autoregressive image modelling at
# https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial12/Autoregressive_Image_Modeling.html
class MaskedConv2D(nn.Module):
features: int
mask: np.ndarray
dilation: int = 1
param_dtype: DType = jnp.float32
kernel_init: NNInitFunc = default_kernel_init
bias_init: NNInitFunc = zeros
@nn.compact
def __call__(self, x):
# Flax's convolution module already supports masking
# The mask must be the same size as kernel
# => extend over input and output feature channels
if len(self.mask.shape) == 2:
mask_ext = self.mask[..., None, None]
mask_ext = jnp.tile(mask_ext, (1, 1, x.shape[-1], self.features))
else:
mask_ext = self.mask
# Convolution with masking
x = nn.Conv(
features=self.features,
kernel_size=self.mask.shape[:2],
kernel_dilation=self.dilation,
mask=mask_ext,
param_dtype=self.param_dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init
)(x)
return x
class VerticalStackConv(nn.Module):
features: int
kernel_size: int
mask_center: bool = False
dilation: int = 1
param_dtype: DType = jnp.float32
kernel_init: NNInitFunc = default_kernel_init
bias_init: NNInitFunc = zeros
def setup(self):
# Mask out all sites on the same row
mask = np.ones((self.kernel_size, self.kernel_size), dtype=np.float32)
mask[self.kernel_size//2+1:, :] = 0
if self.mask_center:
mask[self.kernel_size//2, :] = 0
self.conv = MaskedConv2D(
features=self.features,
mask=mask,
dilation=self.dilation,
param_dtype=self.param_dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init
)
def __call__(self, x):
return self.conv(x)
class HorizontalStackConv(nn.Module):
features: int
kernel_size: int
mask_center: bool = False
dilation: int = 1
param_dtype: DType = jnp.float32
kernel_init: NNInitFunc = default_kernel_init
bias_init: NNInitFunc = zeros
def setup(self):
# Mask out all sites on the left of the same row
mask = np.ones((1, self.kernel_size), dtype=np.float32)
mask[0, self.kernel_size//2+1:] = 0
if self.mask_center:
mask[0, self.kernel_size//2] = 0
self.conv = MaskedConv2D(
features=self.features,
mask=mask,
dilation=self.dilation,
param_dtype=self.param_dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init
)
def __call__(self, x: Array) -> Array:
return self.conv(x)
class CausalConv2d(nn.Module):
n_channels: int = 32
kernel_size: int = 3
activation: Callable = nn.relu
param_dtype: DType = jnp.float32
kernel_init: NNInitFunc = default_kernel_init
bias_init: NNInitFunc = zeros
def setup(self):
# Convolutions
self.conv_v = VerticalStackConv(
features=self.n_channels,
kernel_size=self.kernel_size,
mask_center=False,
param_dtype=self.param_dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init
)
self.conv_h = HorizontalStackConv(
features=self.n_channels,
kernel_size=self.kernel_size,
mask_center=False,
param_dtype=self.param_dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init
)
self.conv_v_to_h = nn.Conv(
features=self.n_channels,
kernel_size=(1, 1),
kernel_dilation=(1, 1),
param_dtype=self.param_dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init
)
self.conv_h_to_1x1 = nn.Conv(
features=self.n_channels,
kernel_size=(1, 1),
kernel_dilation=(1, 1),
param_dtype=self.param_dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init
)
def __call__(self, v_stack: Array, h_stack: Array) -> Array:
# Vertical stack
v_features = self.conv_v(v_stack)
v_out = self.activation(v_features)
# Horizontal stack
h_features = self.conv_h(h_stack)
h_features = h_features + self.conv_v_to_h(v_features)
h_features = self.activation(h_features)
h_out = h_stack + self.conv_h_to_1x1(h_features)
return v_out, h_out
| 4,957 | 31.618421 | 120 | py |
GPSKet | GPSKet-master/GPSKet/sampler/autoreg.py | import jax
import numpy as np
from jax import numpy as jnp
from functools import partial
from netket.sampler import Sampler, SamplerState
from netket.utils import struct, HashableArray
from netket.utils.types import PRNGKeyT
def batch_choice(key, a, p):
"""
Batched version of `jax.random.choice`.
Attributes:
key: a PRNGKey used as the random key.
a: 1D array. Random samples are generated from its elements.
p: 2D array of shape `(batch_size, a.size)`. Each slice `p[i, :]` is
the probabilities associated with entries in `a` to generate a sample
at the index `i` of the output. Can be unnormalized.
Returns:
The generated samples as an 1D array of shape `(batch_size,)`.
"""
p_cumsum = p.cumsum(axis=1)
r = p_cumsum[:, -1:] * jax.random.uniform(key, shape=(p.shape[0], 1))
indices = (r > p_cumsum).sum(axis=1)
out = a[indices]
return out
@struct.dataclass
class ARDirectSamplerState(SamplerState):
key: PRNGKeyT
"""state of the random number generator."""
def __repr__(self):
return f"{type(self).__name__}(rng state={self.key})"
@struct.dataclass
class ARDirectSampler(Sampler):
"""Direct sampler for autoregressive QGPS"""
@property
def is_exact(sampler):
return True
def _init_cache(sampler, model, σ, key):
# FIXME: hacky solution to make sure cache of FastARQGPS._conditional
# is not updated during init
if hasattr(model, 'plaquettes'):
L = sampler.hilbert.size
scan_init = (-1, np.zeros(L), np.arange(L))
else:
scan_init = -1
variables = model.init(key, σ, scan_init, method=model._conditional)
if "cache" in variables:
cache = variables["cache"]
else:
cache = None
return cache
def _init_state(sampler, model, variables, key):
return ARDirectSamplerState(key=key)
def _reset(sampler, model, variables, state):
return state
def _sample_chain(sampler, model, variables, state, chain_length):
σ, new_state = _sample_chain(sampler, model, variables, state, chain_length)
return σ, new_state
def _sample_next(sampler, model, variables, state):
σ, new_state = sampler._sample_chain(model, variables, state, 1)
σ = σ.squeeze(axis=0)
return new_state, σ
@partial(jax.jit, static_argnums=(1, 4))
def _sample_chain(sampler, model, variables, state, chain_length):
if "cache" in variables:
variables, _ = variables.pop("cache")
def scan_fun(carry, args):
σ, cache, key = carry
if cache:
_variables = {**variables, "cache": cache}
else:
_variables = variables
new_key, key = jax.random.split(key)
p, mutables = model.apply(
_variables,
σ,
args,
method=model._conditional,
mutable=["cache"]
)
if "cache" in mutables:
cache = mutables["cache"]
else:
cache = None
local_states = jnp.asarray(sampler.hilbert.local_states, dtype=sampler.dtype)
new_σ = batch_choice(key, local_states, p)
if hasattr(model, 'plaquettes'):
index = args[0]
else:
index = args
σ = σ.at[:, index].set(new_σ)
return (σ, cache, new_key), None
new_key, key_init, key_scan, key_symm = jax.random.split(state.key, 4)
# We just need a buffer for `σ` before generating each sample
# The result does not depend on the initial contents in it
batch_size = chain_length * sampler.n_chains_per_rank
σ = jnp.zeros(
(batch_size, sampler.hilbert.size),
dtype=sampler.dtype,
)
# Init `cache` before generating each sample,
# even if `variables` is not changed and `reset` is not called
cache = sampler._init_cache(model, σ, key_init)
indices = jnp.arange(sampler.hilbert.size)
if hasattr(model, 'plaquettes'):
masks = np.asarray(model.masks, np.int32)
plaquettes = np.asarray(model.plaquettes, np.int32)
scan_init = (indices, masks, plaquettes)
else:
scan_init = indices
use_scan = True
if hasattr(model, 'M'):
if isinstance(model.M, HashableArray):
use_scan = False
if use_scan:
(σ, _, _), _ = jax.lax.scan(
scan_fun,
(σ, cache, key_scan),
scan_init,
)
else:
for i in range(sampler.hilbert.size):
if hasattr(model, 'plaquettes'):
masks = np.asarray(model.masks, np.int32)
plaquettes = np.asarray(model.plaquettes, np.int32)
scan_init = (indices, masks, plaquettes)
(σ, cache, key_scan), _ = scan_fun((σ, cache, key_scan), (i, np.asarray(model.masks, np.int32)[i], np.asarray(model.plaquettes, np.int32)[i]))
else:
(σ, cache, key_scan), _ = scan_fun((σ, cache, key_scan), i)
# Apply symmetries
if type(model.apply_symmetries) == tuple:
syms = model.apply_symmetries[0]
else:
syms = model.apply_symmetries
σ = syms(σ) # (B, L, T)
# Sample transformations uniformly
r = jax.random.randint(key_symm, shape=(batch_size,), minval=0, maxval=σ.shape[-1])
σ = jnp.take_along_axis(σ, jnp.expand_dims(r, axis=(-2,-1)), axis=-1).reshape(σ.shape[:-1]) # (B, L)
σ = σ.reshape((chain_length, sampler.n_chains_per_rank, sampler.hilbert.size))
new_state = state.replace(key=new_key)
return σ, new_state | 5,605 | 31.593023 | 158 | py |
GPSKet | GPSKet-master/GPSKet/sampler/metropolis_fast.py | import jax
import jax.numpy as jnp
from netket.utils import struct
from netket.sampler.metropolis import MetropolisSampler, MetropolisRule
from netket.sampler.rules.exchange import compute_clusters
class MetropolisRuleWithUpdate(MetropolisRule):
pass
@struct.dataclass
class MetropolisFastSampler(MetropolisSampler):
"""
TODO: here we require some checking if the transition rule also returns the updates.
"""
def _sample_next(sampler, machine, parameters, state):
try:
fast_update = machine.apply_fast_update
except:
fast_update = False
assert(fast_update)
"""
Fast implementation of the _sample_next function for qGPS models (allowing for fast updates),
implementation is based on the original netket implementation for the metropolis sampler.
Note that the updating is still not strictly constant in the system size as full configurations
(together with intermediate values) are copied at each sampling step. However there is less
overhead as the amplitude computation is performed by fast updating.
"""
def loop_body(i, s):
# 1 to propagate for next iteration, 1 for uniform rng and n_chains for transition kernel
s["key"], key1, key2 = jax.random.split(s["key"], 3)
σp, log_prob_correction, update_sites = sampler.rule.transition(
sampler, machine, parameters, state, key1, s["σ"]
)
params = {**parameters, **s["intermediates_cache"]}
updated_occupancy = jax.vmap(jnp.take, in_axes=(0, 0), out_axes=0)(σp, update_sites)
value, new_intermediates_cache = machine.apply(params, updated_occupancy, mutable="intermediates_cache", cache_intermediates=True, update_sites=update_sites)
proposal_log_prob = (
sampler.machine_pow * value.real
)
uniform = jax.random.uniform(key2, shape=(sampler.n_chains_per_rank,))
if log_prob_correction is not None:
do_accept = uniform < jnp.exp(
proposal_log_prob - s["log_prob"] + log_prob_correction
)
else:
do_accept = uniform < jnp.exp(proposal_log_prob - s["log_prob"])
# do_accept must match ndim of proposal and state (which is 2)
s["σ"] = jnp.where(do_accept.reshape(-1, 1), σp, s["σ"])
def update(old_state, new_state):
return jax.vmap(jnp.where)(do_accept, old_state, new_state)
s["intermediates_cache"] = jax.tree_map(update, new_intermediates_cache, s["intermediates_cache"])
s["accepted"] += do_accept.sum()
s["log_prob"] = jax.numpy.where(
do_accept.reshape(-1), proposal_log_prob, s["log_prob"]
)
return s
new_rng, rng = jax.random.split(state.rng)
value, intermediates_cache = machine.apply(parameters, state.σ, mutable="intermediates_cache", cache_intermediates=True)
init_s = {
"key": rng,
"σ": state.σ,
"intermediates_cache": intermediates_cache,
"log_prob": sampler.machine_pow*value.real,
"accepted": state.n_accepted_proc
}
s = jax.lax.fori_loop(0, sampler.n_sweeps, loop_body, init_s)
new_state = state.replace(
rng=new_rng,
σ=s["σ"],
n_accepted_proc=s["accepted"],
n_steps_proc=state.n_steps_proc
+ sampler.n_sweeps * sampler.n_chains_per_rank,
)
return new_state, new_state.σ
def dummy(self):
return self
def MetropolisFastExchange(hilbert, *args, clusters=None, graph=None, d_max=1, **kwargs) -> MetropolisFastSampler:
from .rules.exchange_with_update import ExchangeRuleWithUpdate
#TODO: clean this up and follow the standard netket design
if clusters is None:
assert(graph is not None)
clusters = compute_clusters(graph, d_max)
exchange_rule_with_updates = ExchangeRuleWithUpdate(jnp.array(clusters))
return MetropolisFastSampler(hilbert, exchange_rule_with_updates, *args, **kwargs) | 4,198 | 40.574257 | 169 | py |
GPSKet | GPSKet-master/GPSKet/sampler/rules/exchange_with_update.py | import jax
import jax.numpy as jnp
from flax import struct
from netket.sampler.rules.exchange import ExchangeRule_
@struct.dataclass
class ExchangeRuleWithUpdate(ExchangeRule_):
"""
Exchange Update rule which also returns the list of affected sites which is required for the fast metropolis sampler
"""
returns_updates: bool = True
def transition(rule, sampler, machine, parameters, state, key, σ):
n_chains = σ.shape[0]
# pick a random cluster
cluster_id = jax.random.randint(
key, shape=(n_chains,), minval=0, maxval=rule.clusters.shape[0]
)
def scalar_update_fun(σ, cluster):
# sites to be exchanged,
si = rule.clusters[cluster, 0]
sj = rule.clusters[cluster, 1]
σp = σ.at[si].set(σ[sj])
return (σp.at[sj].set(σ[si]), si, sj)
out = jax.vmap(scalar_update_fun, in_axes=(0, 0), out_axes=0)(σ, cluster_id)
update_sites = jnp.stack((out[1], out[2]), axis=-1)
return (out[0], None, update_sites) | 1,055 | 33.064516 | 120 | py |
GPSKet | GPSKet-master/GPSKet/sampler/rules/fermionic_hopping.py | import jax
import jax.numpy as jnp
from flax import struct
from netket.sampler.metropolis import MetropolisRule
from typing import Optional
from netket.utils.types import Array
def transition_function(key, sample, hop_probability, transition_probs=None, return_updates=False):
def apply_electron_hop(samp, key):
keyA, keyB, keyC, keyD = jax.random.split(key, num=4)
is_occ_up = (samp & 1).astype(bool)
is_occ_down = (samp & 2).astype(bool)
occ = is_occ_up.astype(jnp.uint8) + is_occ_down.astype(jnp.uint8)
hopping_or_exchange = jax.random.choice(keyC, jnp.array([0,1]), p=jnp.array([hop_probability, 1-hop_probability]))
occ_prob = jnp.where(hopping_or_exchange==0, occ, jnp.logical_and(occ!=2, occ!=0))
start_site = jax.random.choice(keyA, samp.shape[-1], p=occ_prob)
spin_probs = jnp.array([is_occ_up[start_site], is_occ_down[start_site]])
spin = jax.random.choice(keyB, 2, p=spin_probs)+1
target_site_probs = jnp.where(hopping_or_exchange==0, ~((samp & spin).astype(bool)), jnp.logical_and(jnp.logical_and(occ!=2, occ!=0), ~((samp & spin).astype(bool))))
if transition_probs is not None:
target_site = jax.random.choice(keyD, samp.shape[-1], p=transition_probs[start_site, :])
else:
target_site = jax.random.choice(keyD, samp.shape[-1], p = target_site_probs)
# Make sure no unallowed move is applied
target_site = jnp.where(target_site_probs[target_site]==False, start_site, target_site)
updated_sample = samp.at[start_site].add(-spin)
updated_sample = updated_sample.at[target_site].add(spin)
def get_exchange(_):
updated_sample_exchanged = updated_sample.at[start_site].add(3-spin)
return updated_sample_exchanged.at[target_site].add(-(3-spin))
return (jax.lax.cond(hopping_or_exchange==0, lambda _: updated_sample, get_exchange, None), start_site, target_site)
keys = jax.random.split(key, num=sample.shape[0])
dtype = sample.dtype
sample = jnp.asarray(sample, jnp.uint8)
updated_sample, start_sites, target_sites = jax.vmap(apply_electron_hop, in_axes=(0, 0), out_axes=(0, 0, 0))(sample, keys)
updated_sample = jnp.array(updated_sample, dtype)
if return_updates:
update_sites = jnp.stack((start_sites, target_sites), axis=-1)
return (updated_sample, None, update_sites)
else:
return (updated_sample, None)
transition_fun_with_update = lambda key, sample, hop_probability, transition_probs : transition_function(key, sample, hop_probability, transition_probs, return_updates=True)
transition_fun_without_update = lambda key, sample, hop_probability, transition_probs : transition_function(key, sample, hop_probability, transition_probs, return_updates=False)
@struct.dataclass
class FermionicHoppingRule(MetropolisRule):
"""
Fermionic hopping update rule
"""
hop_probability: float = 1.
transition_probs: Optional[Array] = None
def transition(rule, sampler, machine, parameters, state, key, sample):
return transition_fun_without_update(key, sample, rule.hop_probability, transition_probs=rule.transition_probs)
@struct.dataclass
class FermionicHoppingRuleWithUpdates(MetropolisRule):
"""
Fermionic hopping update rule which also returns the list of affected sites
which is required for the fast metropolis sampler
"""
hop_probability: float = 1.
transition_probs: Optional[Array] = None
def transition(rule, sampler, machine, parameters, state, key, sample):
return transition_fun_with_update(key, sample, rule.hop_probability, transition_probs=rule.transition_probs)
| 3,703 | 49.739726 | 177 | py |
GPSKet | GPSKet-master/GPSKet/operator/hamiltonian/ab_initio_sparse.py | import numpy as np
import netket as nk
import jax.numpy as jnp
import jax
from numba import jit
import netket.jax as nkjax
from typing import Optional
from functools import partial
from GPSKet.operator.hamiltonian.ab_initio import AbInitioHamiltonianOnTheFly, get_parity_multiplicator_hop
from netket.utils.types import DType
from GPSKet.operator.fermion import FermionicDiscreteOperator, apply_hopping
from GPSKet.models import qGPS
class AbInitioHamiltonianSparse(AbInitioHamiltonianOnTheFly):
""" Implementation of an ab initio Hamiltonian utilizing sparse structure in the
one- and two-electron integrals. If a localized basis is used, this gives a reduction to O(N^2)
terms which need to be evaluated for each local energy. Currently, the sparse structure is set
up in the constructor resulting in a bit of memory overhead.
TODO: Improve memory footprint.
"""
def __init__(self, hilbert, h_mat, eri_mat):
super().__init__(hilbert, h_mat, eri_mat)
# Set up the sparse structure
"""
Start/end ids into the flattened arrays holding the nonzero
orbital ids and values for each first index into the one-electron array
start_id[i] = self.h1_nonzero_start[i]
end_id[i] = self.h1_nonzero_start[i+1]
non_zero_ids(i) = non_zero_ids_flattened[start_id[i]:end_id[i]]
non_zero_vals(i) = non_zero_vals_flattened[start_id[i]:end_id[i]]
"""
self.h1_nonzero_range = np.zeros(self.h_mat.shape[0]+1, dtype=int)
self.h1_nonzero_ids_flat = np.zeros(0, dtype=int)
self.h1_nonzero_vals_flat = np.zeros(0, dtype=self.h_mat.dtype)
# Construct flattened arrays
for i in range(self.h_mat.shape[0]):
nonzeros = np.nonzero(self.h_mat[i,:])[0]
self.h1_nonzero_range[i+1] = self.h1_nonzero_range[i] + len(nonzeros)
self.h1_nonzero_ids_flat = np.append(self.h1_nonzero_ids_flat, nonzeros)
self.h1_nonzero_vals_flat = np.append(self.h1_nonzero_vals_flat, self.h_mat[i,nonzeros])
"""
Start/end ids into the flattened arrays holding the nonzero
orbital ids and values for each (i,j) index pair into the eri array (indexed as eri[i,a,j,b])
start_id[i,j] = self.h2_nonzero_start[i, j]
end_id[i,j] = self.h2_nonzero_start[i, j+1]
non_zero_ids(i,j) = non_zero_ids_flattened[start_id[i,j]:end_id[i,j]] -> index pair(a,b)
non_zero_vals(i,j) = non_zero_vals_flattened[start_id[i,j]:end_id[i,j]]
"""
self.h2_nonzero_range = np.zeros((self.eri_mat.shape[0], self.eri_mat.shape[2]+1), dtype=int)
self.h2_nonzero_ids_flat = np.zeros((0,2), dtype=int)
self.h2_nonzero_vals_flat = np.zeros(0, dtype=self.eri_mat.dtype)
# Construct flattened arrays
for i in range(self.eri_mat.shape[0]):
for j in range(self.eri_mat.shape[2]):
nonzeros = np.array(np.nonzero(self.eri_mat[i,:, j, :]))
self.h2_nonzero_range[i,j+1] = self.h2_nonzero_range[i,j] + nonzeros.shape[1]
if j == self.eri_mat.shape[2]-1 and i != self.eri_mat.shape[0]-1:
self.h2_nonzero_range[i+1,0] = self.h2_nonzero_range[i,j] + nonzeros.shape[1]
self.h2_nonzero_ids_flat = np.append(self.h2_nonzero_ids_flat, nonzeros.T, axis=0)
self.h2_nonzero_vals_flat = np.append(self.h2_nonzero_vals_flat, self.eri_mat[i,nonzeros[0,:], j, nonzeros[1,:]])
def local_en_on_the_fly(n_elecs, logpsi, pars, samples, args, use_fast_update=False, chunk_size=None):
h1_nonzero_range = args[0]
h1_nonzero_ids_flat = args[1]
h1_nonzero_vals_flat = args[2]
h2_nonzero_range = args[3]
h2_nonzero_ids_flat = args[4]
h2_nonzero_vals_flat = args[5]
n_sites = samples.shape[-1]
def vmap_fun(sample):
sample = jnp.asarray(sample, jnp.uint8)
is_occ_up = (sample & 1)
is_occ_down = (sample & 2) >> 1
up_count = jnp.cumsum(is_occ_up, dtype=int)
down_count = jnp.cumsum(is_occ_down, dtype=int)
is_empty_up = 1 >> is_occ_up
is_empty_down = 1 >> is_occ_down
up_occ_inds, = jnp.nonzero(is_occ_up, size=n_elecs[0])
down_occ_inds, = jnp.nonzero(is_occ_down, size=n_elecs[1])
up_unocc_inds, = jnp.nonzero(is_empty_up, size=n_sites-n_elecs[0])
down_unocc_inds, = jnp.nonzero(is_empty_down, size=n_sites-n_elecs[1])
# Compute log_amp of sample
if use_fast_update:
log_amp, intermediates_cache = logpsi(pars, jnp.expand_dims(sample, 0), mutable="intermediates_cache", cache_intermediates=True)
parameters = {**pars, **intermediates_cache}
else:
log_amp = logpsi(pars, jnp.expand_dims(sample, 0))
""" This function returns the log_amp of the connected configuration which is only specified
by the occupancy on the updated sites as well as the indices of the sites updated."""
def get_connected_log_amp(updated_occ_partial, update_sites):
if use_fast_update:
log_amp_connected = logpsi(parameters, jnp.expand_dims(updated_occ_partial, 0), update_sites=jnp.expand_dims(update_sites, 0))
else:
"""
Careful: Go through update_sites in reverse order to ensure the actual updates (which come first in the array)
are applied and not the dummy updates.
Due to the non-determinism of updates with .at, we cannot use this and need to scan explicitly.
"""
def scan_fun(carry, count):
return (carry.at[update_sites[count]].set(updated_occ_partial[count]), None)
updated_config = jax.lax.scan(scan_fun, sample, jnp.arange(len(update_sites)), reverse=True)[0]
log_amp_connected = logpsi(pars, jnp.expand_dims(updated_config, 0))
return log_amp_connected
# Computes term from single electron hop
# up spin
def compute_1B_up(i):
def inner_loop(a_index, val):
a = h1_nonzero_ids_flat[a_index]
def valid_hop():
# Updated config at update sites
new_occ = jnp.array([sample[i]-1, sample[a]+1], dtype=jnp.uint8)
update_sites = jnp.array([i, a])
# Get parity
parity_multiplicator = get_parity_multiplicator_hop(update_sites, up_count)
# Evaluate amplitude ratio
log_amp_connected = get_connected_log_amp(new_occ, update_sites)
amp_ratio = jnp.squeeze(jnp.exp(log_amp_connected - log_amp))
return (amp_ratio * parity_multiplicator)
def invalid_hop():
return jax.lax.select(i==a, jnp.array(1, dtype=log_amp.dtype), jnp.array(0, dtype=log_amp.dtype))
return val + h1_nonzero_vals_flat[a_index] * jax.lax.cond(is_empty_up[a], valid_hop, invalid_hop)
return jax.lax.fori_loop(h1_nonzero_range[i], h1_nonzero_range[i+1], inner_loop, jnp.array(0, dtype=log_amp.dtype))
local_en = jnp.sum(jax.vmap(compute_1B_up)(up_occ_inds))
def compute_1B_down(i):
def inner_loop(a_index, val):
a = h1_nonzero_ids_flat[a_index]
def valid_hop():
# Updated config at update sites
new_occ = jnp.array([sample[i]-2, sample[a]+2], dtype=jnp.uint8)
update_sites = jnp.array([i, a])
# Get parity
parity_multiplicator = get_parity_multiplicator_hop(update_sites, down_count)
# Evaluate amplitude ratio
log_amp_connected = get_connected_log_amp(new_occ, update_sites)
amp_ratio = jnp.squeeze(jnp.exp(log_amp_connected - log_amp))
return (amp_ratio * parity_multiplicator)
def invalid_hop():
return jax.lax.select(i==a, jnp.array(1, dtype=log_amp.dtype), jnp.array(0, dtype=log_amp.dtype))
return val + h1_nonzero_vals_flat[a_index] * jax.lax.cond(is_empty_down[a], valid_hop, invalid_hop)
return jax.lax.fori_loop(h1_nonzero_range[i], h1_nonzero_range[i+1], inner_loop, jnp.array(0, dtype=log_amp.dtype))
local_en += jnp.sum(jax.vmap(compute_1B_down)(down_occ_inds))
# Helper function which updates a config, also taking into account a previous update
def update_config(site, update_sites, updated_conf, spin_int, create):
update_sites = jnp.append(update_sites, site)
updated_conf = jnp.append(updated_conf, sample[site])
first_matching_index = jnp.nonzero(update_sites == site, size=1)[0][0]
valid = jax.lax.select(create, ~(updated_conf[first_matching_index]&spin_int).astype(bool), (updated_conf[first_matching_index]&spin_int).astype(bool))
updated_conf = updated_conf.at[first_matching_index].add(jax.lax.select(create, spin_int, -spin_int))
return updated_conf, valid, update_sites
def two_body_up_up_occ(inds):
i = up_occ_inds[inds[0]]
j = up_occ_inds[inds[1]]
update_sites_ij = jnp.array([i, j])
new_occ_ij = jnp.array([sample[i]-1, sample[j]-1], dtype=jnp.uint8)
parity_count_ij = up_count[i] + up_count[j] - 2
def inner_loop(ab_index, val):
a = h2_nonzero_ids_flat[ab_index, 0]
b = h2_nonzero_ids_flat[ab_index, 1]
new_occ_ijb, valid_b, update_sites_ijb = update_config(b, update_sites_ij, new_occ_ij, 1, True)
new_occ, valid_a, update_sites = update_config(a, update_sites_ijb, new_occ_ijb, 1, True)
valid = valid_a & valid_b
def get_val():
parity_count = parity_count_ij + up_count[a] + up_count[b]
parity_count -= (a >= j).astype(int) + (a >= i).astype(int) + (b >= j).astype(int) + (b >= i).astype(int) - (a >= b).astype(int) + (j > i).astype(int)
parity_multiplicator = -2*(parity_count & 1) + 1
log_amp_connected = get_connected_log_amp(new_occ, update_sites)
amp_ratio = jnp.squeeze(jnp.exp(log_amp_connected - log_amp))
return (h2_nonzero_vals_flat[ab_index] * amp_ratio * parity_multiplicator)
value = jax.lax.cond(valid, get_val, lambda : jnp.array(0., dtype=log_amp.dtype))
return val + value
return jax.lax.fori_loop(h2_nonzero_range[i,j], h2_nonzero_range[i,j+1], inner_loop, jnp.array(0, dtype=log_amp.dtype))
local_en += jnp.sum(jax.vmap(two_body_up_up_occ)(jnp.triu_indices(up_occ_inds.shape[0], k=1)))
def two_body_down_down_occ(inds):
i = down_occ_inds[inds[0]]
j = down_occ_inds[inds[1]]
update_sites_ij = jnp.array([i, j])
new_occ_ij = jnp.array([sample[i]-2, sample[j]-2], dtype=jnp.uint8)
parity_count_ij = down_count[i] + down_count[j] - 2
def inner_loop(ab_index, val):
a = h2_nonzero_ids_flat[ab_index, 0]
b = h2_nonzero_ids_flat[ab_index, 1]
new_occ_ijb, valid_b, update_sites_ijb = update_config(b, update_sites_ij, new_occ_ij, 2, True)
new_occ, valid_a, update_sites = update_config(a, update_sites_ijb, new_occ_ijb, 2, True)
valid = valid_a & valid_b
def get_val():
parity_count = parity_count_ij + down_count[a] + down_count[b]
parity_count -= (a >= j).astype(int) + (a >= i).astype(int) + (b >= j).astype(int) + (b >= i).astype(int) - (a >= b).astype(int) + (j > i).astype(int)
parity_multiplicator = -2*(parity_count & 1) + 1
log_amp_connected = get_connected_log_amp(new_occ, update_sites)
amp_ratio = jnp.squeeze(jnp.exp(log_amp_connected - log_amp))
return (h2_nonzero_vals_flat[ab_index] * amp_ratio * parity_multiplicator)
value = jax.lax.cond(valid, get_val, lambda : jnp.array(0., dtype=log_amp.dtype))
return val + value
return jax.lax.fori_loop(h2_nonzero_range[i,j], h2_nonzero_range[i,j+1], inner_loop, jnp.array(0, dtype=log_amp.dtype))
local_en += jnp.sum(jax.vmap(two_body_down_down_occ)(jnp.triu_indices(down_occ_inds.shape[0], k=1)))
def two_body_up_down_occ(inds):
i = up_occ_inds[inds[0]]
j = down_occ_inds[inds[1]]
update_sites_i = jnp.array([i])
new_occ_i = jnp.array([sample[i]-1], dtype=jnp.uint8)
new_occ_ij, _, update_sites_ij = update_config(j, update_sites_i, new_occ_i, 2, False)
parity_count_ij = up_count[i] + down_count[j] - 2
def inner_loop(ab_index, val):
a = h2_nonzero_ids_flat[ab_index, 0]
b = h2_nonzero_ids_flat[ab_index, 1]
new_occ_ijb, valid_b, update_sites_ijb = update_config(b, update_sites_ij, new_occ_ij, 2, True)
new_occ, valid_a, update_sites = update_config(a, update_sites_ijb, new_occ_ijb, 1, True)
valid = valid_a & valid_b
def get_val():
parity_count = parity_count_ij + up_count[a] + down_count[b]
parity_count -= (a >= i).astype(int) + (b >= j).astype(int)
parity_multiplicator = -2*(parity_count & 1) + 1
log_amp_connected = get_connected_log_amp(new_occ, update_sites)
amp_ratio = jnp.squeeze(jnp.exp(log_amp_connected - log_amp))
return (h2_nonzero_vals_flat[ab_index] * amp_ratio * parity_multiplicator)
value = jax.lax.cond(valid, get_val, lambda : jnp.array(0., dtype=log_amp.dtype))
return val + value
return jax.lax.fori_loop(h2_nonzero_range[i,j], h2_nonzero_range[i,j+1], inner_loop, jnp.array(0, dtype=log_amp.dtype))
row_inds, col_inds = jnp.indices((up_occ_inds.shape[0], down_occ_inds.shape[0]))
local_en += jnp.sum(jax.vmap(two_body_up_down_occ)((row_inds.flatten(), col_inds.flatten())))
return local_en
return nkjax.vmap_chunked(vmap_fun, chunk_size=chunk_size)(samples)
@nk.vqs.get_local_kernel_arguments.dispatch
def get_local_kernel_arguments(vstate: nk.vqs.MCState, op: AbInitioHamiltonianSparse):
samples = vstate.samples
h1_nonzero_range = jnp.array(op.h1_nonzero_range)
h1_nonzero_ids_flat = jnp.array(op.h1_nonzero_ids_flat)
h1_nonzero_vals_flat = jnp.array(op.h1_nonzero_vals_flat)
h2_nonzero_range = jnp.array(op.h2_nonzero_range)
h2_nonzero_ids_flat = jnp.array(op.h2_nonzero_ids_flat)
h2_nonzero_vals_flat = jnp.array(op.h2_nonzero_vals_flat)
return (samples, (h1_nonzero_range, h1_nonzero_ids_flat, h1_nonzero_vals_flat,
h2_nonzero_range, h2_nonzero_ids_flat, h2_nonzero_vals_flat))
@nk.vqs.get_local_kernel.dispatch(precedence=1)
def get_local_kernel(vstate: nk.vqs.MCState, op: AbInitioHamiltonianSparse, chunk_size: Optional[int] = None):
try:
use_fast_update = vstate.model.apply_fast_update
except:
use_fast_update = False
return nkjax.HashablePartial(local_en_on_the_fly, vstate.hilbert._n_elec, use_fast_update=use_fast_update, chunk_size=chunk_size)
| 15,532 | 54.27758 | 170 | py |
GPSKet | GPSKet-master/GPSKet/operator/hamiltonian/J1J2.py | import jax
import jax.numpy as jnp
import netket as nk
import netket.jax as nkjax
from netket.vqs.mc.mc_state.state import MCState
from GPSKet.models import qGPS
import GPSKet.vqs.mc.mc_state.expect
from typing import Optional
# dummy class used if the local energy should be evaluated on the fly (allowing for fast updating)
class HeisenbergOnTheFly(nk.operator.Heisenberg):
pass
def get_J1_J2_Hamiltonian(Lx, Ly=None, J1=1., J2=0., sign_rule=True, total_sz=0.0, on_the_fly_en=False, pbc = True):
if J2 != 0.:
nb_order = 2
else:
nb_order = 1
if Ly is None:
g = nk.graph.Chain(Lx, max_neighbor_order=nb_order, pbc = pbc)
else:
g = nk.graph.Grid([Lx, Ly], max_neighbor_order=nb_order, pbc = pbc)
hilbert = nk.hilbert.Spin(0.5, total_sz=total_sz, N=g.n_nodes)
"""
This is a slightly hacky way to determine if dispatch rules specified below
(evaluating the local energy on the fly and for the qGPS applying the fast update)
are applied or not.
"""
if on_the_fly_en:
classtype = HeisenbergOnTheFly
else:
classtype = nk.operator.Heisenberg
if J2 != 0:
hamiltonian = classtype(hilbert, g, J=[J1/4, J2/4], sign_rule=sign_rule)
else:
hamiltonian = classtype(hilbert, g, J=J1/4, sign_rule=sign_rule)
return hamiltonian
"""
This is a custom way of evaluating the expectation values for Heisenberg models.
It can make use of the fast update functionality of the qGPS ansatz.
Furthermore it can reduce the memory requirements compared to the default netket implementation as connected
configurations are not all created at once but created on the fly.
It can probably at one point also be extended beyond Heisenberg models but at the moment
it explicitly requires that each operator in the Hamiltonian acts on a pair of spins
and connects the test configuration to at most one other different configuration.
"""
def local_en_on_the_fly(states_to_local_indices, logpsi, pars, samples, args, use_fast_update=False, chunk_size=None):
operators = args[0]
acting_on = args[1]
def vmap_fun(sample):
if use_fast_update:
log_amp, intermediates_cache = logpsi(pars, jnp.expand_dims(sample, 0), mutable="intermediates_cache", cache_intermediates=True)
parameters = {**pars, **intermediates_cache}
else:
log_amp = logpsi(pars, jnp.expand_dims(sample, 0))
def inner_vmap(operator_element, acting_on_element):
rel_occ = sample[acting_on_element]
basis_index = jnp.sum(states_to_local_indices(rel_occ) * jnp.array([1,2]))
# the way this is set up at the moment is only valid for Heisenberg models where at most one non-zero off-diagonal exists
off_diag_connected = jnp.array([0,2,1,3]) # indices of the non-zero off-diagonal element (or the diagonal index if no non-zero off-diagonal exists)
def compute_element(connected_index):
mel = operator_element[basis_index, connected_index]
new_occ = 2 * jnp.array([connected_index % 2, connected_index // 2]) - 1. # map back to standard netket representation of spin configurations
if use_fast_update:
log_amp_connected = logpsi(parameters, jnp.expand_dims(new_occ, 0), update_sites=jnp.expand_dims(acting_on_element, 0))
else:
updated_config = sample.at[acting_on_element].set(new_occ)
log_amp_connected = logpsi(pars, jnp.expand_dims(updated_config, 0))
return jnp.squeeze(mel * jnp.exp(log_amp_connected - log_amp))
# This has a bit of overhead as there is no good way of shortcutting if the non-zero element is the diagonal
off_diag = jnp.where(off_diag_connected[basis_index] != basis_index, compute_element(off_diag_connected[basis_index]), 0.)
return off_diag + operator_element[basis_index, basis_index]
return jnp.sum(jax.vmap(inner_vmap)(operators, acting_on))
return nkjax.vmap_chunked(vmap_fun, chunk_size=chunk_size)(samples)
@nk.vqs.get_local_kernel_arguments.dispatch
def get_local_kernel_arguments(vstate: nk.vqs.MCState, op: HeisenbergOnTheFly):
samples = vstate.samples
operators = jnp.array(op.operators)
acting_on = jnp.array(op.acting_on)
return (samples, (operators, acting_on))
@nk.vqs.get_local_kernel.dispatch(precedence=1)
def get_local_kernel(vstate: nk.vqs.MCState, op: HeisenbergOnTheFly, chunk_size: Optional[int] = None):
try:
use_fast_update = vstate.model.apply_fast_update
except:
use_fast_update = False
return nkjax.HashablePartial(local_en_on_the_fly, op.hilbert.states_to_local_indices, use_fast_update=use_fast_update, chunk_size=chunk_size)
| 4,803 | 47.525253 | 159 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.