repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
interactive-image2video-synthesis | interactive-image2video-synthesis-main/models/discriminator.py | import torch
from torch import nn
from torch.optim import Adam
import functools
from torch.nn.utils import spectral_norm
import math
import numpy as np
from utils.general import get_member
from models.blocks import SPADE
class GANTrainer(object):
def __init__(self, config, load_fn,logger,spatial_size=128, parallel=False, devices=None, debug=False, temporal=False, sequence_length = None):
self.config = config
self.logger = logger
# disc
self.logger.info("Load discriminator model")
self.temporal = temporal
if self.temporal:
assert sequence_length is not None
self.key = "gan_temp"
self.disc = resnet(config=config[self.key],spatial_size=spatial_size,sequence_length=sequence_length)
self.load_key = "disc_temp"
self.postfix = "temp"
if self.disc.cond:
self.logger.info(f"Using Conditional temporal discriminator.")
else:
self.key = "gan"
self.disc = PatchDiscriminator(self.config[self.key])
self.load_key = "disc_patch"
self.postfix = "patch"
self.cond = self.config[self.key]["conditional"] if self.temporal and "conditional" in self.config[self.key] else False
self.logger.info(f"Number of parameters in discriminator_{self.postfix} is {sum(p.numel() for p in self.disc.parameters())}.")
self.parallel = parallel
self.devices = devices
if self.parallel:
assert self.devices is not None
# load checkpoint if there's any and it is required
disc_ckpt = disc_op_ckpt = None
if self.config["general"]["restart"] and not debug:
disc_ckpt, disc_op_ckpt = load_fn(key=self.load_key)
if disc_ckpt is not None:
self.logger.info(f"Resuming training of discriminator...loading weights.")
self.disc.load_state_dict(disc_ckpt)
if self.parallel:
self.disc = nn.DataParallel(self.disc,device_ids=self.devices)
self.disc.cuda(self.devices[0])
else:
self.disc.cuda()
self.logger.info("Discriminator on gpu!")
# disc optimizer
self.disc_opt = Adam(self.disc.parameters(), lr=self.config["training"]["lr"])
if self.config["general"]["restart"] and disc_op_ckpt is not None:
self.disc_opt.load_state_dict(disc_op_ckpt)
# scheduler for disc optimizer
milestones = [int(self.config["training"]["n_epochs"] * t) for t in self.config["training"]["tau"]]
self.disc_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.disc_opt, milestones=milestones, gamma=self.config["training"]["lr_reduce"])
def train_step(self, x_in_true, x_in_fake, cond=None):
# predict
cond = cond if self.cond else None
self.disc.train()
# if self.parallel:
# x_in_true = x_in_true.cuda(self.devices[0])
# x_in_fake = x_in_fake.cuda(self.devices[0])
# set gradient to zero
self.disc_opt.zero_grad()
# real examples
x_in_true.requires_grad_()
pred_true, _ = self.disc(x_in_true, cond)
loss_real = get_member(self.disc,"loss")(pred_true, real=True)
if self.config[self.key]["gp_weight"] > 0:
loss_real.backward(retain_graph=True)
# gradient penalty
loss_gp = get_member(self.disc,"gp")(pred_true, x_in_true).mean()
gp_weighted = self.config[self.key]["gp_weight"] * loss_gp
gp_weighted.backward()
else:
loss_real.backward()
# fake examples
pred_fake, _ = self.disc(x_in_fake.detach(),cond)
loss_fake = get_member(self.disc,"loss")(pred_fake, real=False)
loss_fake.backward()
# optmize parameters
self.disc_opt.step()
loss_disc = ((loss_real + loss_fake) / 2.).item()
out_dict = {f"loss_disc_{self.postfix}": loss_disc, f"p_true_{self.postfix}": torch.sigmoid(pred_true).mean().item(), f"p_fake_{self.postfix}": torch.sigmoid(pred_fake).mean().item(),
f"loss_gp_{self.postfix}": loss_gp.item() if self.config[self.key]["gp_weight"] > 0 else 0 }
# train generator
pred_fake, fmap_fake = self.disc(x_in_fake,cond)
_, fmap_true = self.disc(x_in_true,cond)
if get_member(self.disc,"bce_loss"):
loss_gen = get_member(self.disc,"bce")(pred_fake, torch.ones_like(pred_fake))
else:
loss_gen = -torch.mean(pred_fake)
loss_fmap = get_member(self.disc,"fmap_loss")(fmap_fake, fmap_true)
# if self.parallel:
# loss_fmap = loss_fmap.cuda(self.devices[0])
# loss_gen = loss_gen.cuda(self.devices[0])
return out_dict, loss_gen, loss_fmap
# code taken from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
class PatchDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, config, norm_layer=nn.InstanceNorm2d):
super().__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
deep_disc = config["deep_disc"] if "deep_disc" in config else False
input_nc = 6 if config["pixel_dynamics"] else 3
n_deep_layers = config["deep_layers"]
ndf = 64
n_layers = config["n_layers"]
self.bce_loss = config["bce_loss"]
if self.bce_loss:
self.bce = nn.BCEWithLogitsLoss()
kw = 4
padw = 1
self.layers = nn.ModuleList()
self.norms = nn.ModuleList()
self.in_conv = nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw)
nf_mult = 1
nf_mult_prev = 1
self.act_fn = nn.LeakyReLU(0.2, True)
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
self.layers.append(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.norms.append(norm_layer(ndf * nf_mult))
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
self.layers.append(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias))
self.norms.append(norm_layer(ndf * nf_mult))
n_d = ndf * nf_mult
if deep_disc:
n_max = 1024
for i in range(n_deep_layers):
# add one layer to the original patch discrminator to make it more powerful
self.layers.append(nn.Conv2d(n_d, min(n_max, n_d*2), kernel_size=kw, stride=1, padding=padw, bias=use_bias))
self.norms.append(norm_layer(min(n_max, n_d*2)))
n_d = min(n_max, n_d*2)
self.out_conv = nn.Conv2d(n_d, 1, kernel_size=kw, stride=1, padding=padw) # output 1 channel prediction map
def forward(self, input,cond=None):
"""Standard forward."""
x = self.act_fn(self.in_conv(input))
fmap = []
for i in range(len(self.layers)):
x = self.layers[i](x)
x = self.act_fn(self.norms[i](x))
fmap.append(x)
x = self.out_conv(x)
return x, fmap
def loss(self, pred, real):
if self.bce_loss:
# vanilla gan loss
return self.bce(pred, torch.ones_like(pred) if real else torch.zeros_like(pred))
else:
# hinge loss
if real:
l = torch.mean(torch.nn.ReLU()(1.0 - pred))
else:
l = torch.mean(torch.nn.ReLU()(1.0 + pred))
return l
def gp(self, pred_fake, x_fake):
batch_size = x_fake.size(0)
grad_dout = torch.autograd.grad(
outputs=pred_fake.sum(), inputs=x_fake,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
assert (grad_dout2.size() == x_fake.size())
reg = grad_dout2.view(batch_size, -1).sum(1)
return reg
def fmap_loss(self, fmap1, fmap2, loss="l1"):
recp_loss = 0
for idx in range(len(fmap1)):
if loss == "l1":
recp_loss += torch.mean(torch.abs((fmap1[idx] - fmap2[idx])))
if loss == "l2":
recp_loss += torch.mean((fmap1[idx] - fmap2[idx]) ** 2)
return recp_loss / len(fmap1)
######################################################################################################
###3D-ConvNet Implementation from https://github.com/tomrunia/PyTorchConv3D ##########################
def resnet10(**kwargs):
"""Constructs a ResNet-10 model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model
def resnet(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def conv3x3x3(in_planes, out_planes, stride=1, stride_t=1):
# 3x3x3 convolution with padding
return spectral_norm(nn.Conv3d(
in_planes,
out_planes,
kernel_size=(3, 3, 3),
stride=[stride_t, stride, stride],
padding=[1, 1, 1],
bias=False))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, stride_t=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride, stride_t)
self.bn1 = nn.GroupNorm(num_groups=16, num_channels=planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.GroupNorm(num_groups=16, num_channels=planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
spatial_size,
sequence_length,
config):
super(ResNet, self).__init__()
# spatial_size = config["spatial_size"]
self.inplanes = 64
self.bce_loss = config["bce_loss"]
min_spatial_size = int(spatial_size / 8)
#sample_duration = dic.Network['sequence_length']-1
self.max_channels = config["max_channels"] if "max_channels" in config else 256
self.conv1 = spectral_norm(nn.Conv3d(
3,
64,
kernel_size=(3, 7, 7),
stride=(1, 2, 2),
padding=(1, 3, 3),
bias=False))
self.gn1 = nn.GroupNorm(num_groups=16, num_channels=64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(1, 2, 2), padding=1)
self.layers = nn.ModuleList()
self.patch_temp = config["patch_temp_disc"]
self.spatio_temporal = config["spatio_temporal"] if"spatio_temporal" in config else False
if self.patch_temp:
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layers.append(self._make_layer(block, 128, layers[1], stride=1, stride_t=1))
self.layers.append(self._make_layer(block, 128, layers[2], stride=2, stride_t=1))
self.layers.append(self._make_layer(block, 256, layers[3], stride=2, stride_t=1))
last_size = int(math.ceil(spatial_size / 16))
last_duration = 1
self.avgpool = nn.AvgPool3d((last_duration, last_size, last_size), stride=1)
self.cond = config["conditional"] if "conditional" in config else False
if self.cond:
self.spade_emb = SPADE(norm_nc=block.expansion * 256, label_nc=2, config=config)
self.fc = nn.Linear(256 * block.expansion, config["num_classes"], bias=False)
else:
spatial_size /= 2
self.layer1 = self._make_layer(block, 32, layers[0], stride=1)
n_channels = 64
if "conditional" in config and config["conditional"]:
raise ValueError("If non-patch-gan temporal discriminator is used, conditional must not be True!")
self.cond = False
n = 0
while sequence_length > 1:
blocks = layers[n] if n<sequence_length-1 else layers[-1]
n_channels = min(2*n_channels,self.max_channels)
stride = 1 if spatial_size <= min_spatial_size else 2
spatial_size = int(spatial_size / stride)
stride_t = 1 if self.spatio_temporal else (2 if sequence_length > 1 else 1)
self.layers.append(self._make_layer(block,n_channels,blocks,stride=stride,stride_t=stride_t))
sequence_length = int(math.ceil(sequence_length / 2))
n += 1
self.final = nn.Conv2d(n_channels,1,3,padding=1)
print(f"Temporal discriminator has {len(self.layers)} layers")
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.orthogonal_(m.weight)
def _make_layer(self, block, planes, blocks, stride=1, stride_t=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or stride_t != 1:
downsample = nn.Sequential(
spectral_norm(nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=[3, 3, 3],
stride=[stride_t, stride, stride],
padding=[1, 1, 1],
bias=False)),
nn.GroupNorm(num_channels=planes * block.expansion, num_groups=16))
layers = []
layers.append(block(self.inplanes, planes, stride, stride_t, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, cond=None):
out = []
x = self.conv1(x)
x = self.gn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
out.append(x)
for n in range(len(self.layers)):
x = self.layers[n](x)
out.append(x)
if self.patch_temp:
if self.cond:
x_norm = []
for i in range(x.size(2)):
x_norm.append(self.spade_emb(x[:,:,i],cond))
x_norm = torch.stack(x_norm,2)
else:
x_norm = x
x1 = self.avgpool(x_norm)
output = []
for i in range(x1.size(2)):
output.append(self.fc(x1[:,:,i].reshape(x1.size(0), -1)))
return torch.cat(output, dim=1), out
else:
output = self.final(x.squeeze(2))
return output, out
def loss(self, pred, real):
if self.bce_loss:
# vanilla gan loss
return self.bce(pred, torch.ones_like(pred) if real else torch.zeros_like(pred))
else:
# hinge loss
if real:
l = torch.mean(torch.nn.ReLU()(1.0 - pred))
else:
l = torch.mean(torch.nn.ReLU()(1.0 + pred))
return l
def gp(self, pred_fake, x_fake):
batch_size = x_fake.size(0)
grad_dout = torch.autograd.grad(
outputs=pred_fake.sum(), inputs=x_fake,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
assert (grad_dout2.size() == x_fake.size())
reg = grad_dout2.view(batch_size, -1).sum(1)
return reg
def fmap_loss(self, fmap1, fmap2, loss="l1"):
recp_loss = 0
for idx in range(len(fmap1)):
if loss == "l1":
recp_loss += torch.mean(torch.abs((fmap1[idx] - fmap2[idx])))
if loss == "l2":
recp_loss += torch.mean((fmap1[idx] - fmap2[idx]) ** 2)
return recp_loss / len(fmap1)
# return output, out, mu
if __name__ == '__main__':
## Test 3dconvnet with dummy input
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '7'
config = {"num_classes": 1, "patch_temp_disc": True,"spatial_size": 128, "bce_loss": False, "conditional": True}
dummy = torch.rand((2, 3, 6, 128, 128)).cuda()
dummy_cond = torch.rand((2, 2, 128, 128)).cuda()
model = resnet(config=config,spatial_size=128, sequence_length=dummy.shape[2]).cuda()
print("Number of parameters in generator", sum(p.numel() for p in model.parameters()))
if config["conditional"]:
out, out2 = model(dummy,dummy_cond)
else:
out, out2,= model(dummy)
test = 1 | 17,349 | 37.988764 | 191 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/models/latent_flow_net.py | import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
import math
from models.blocks import Conv2dBlock, ResBlock, AdaINLinear, NormConv2d,ConvGRU
class OscillatorModel(nn.Module):
def __init__(self,spatial_size,config,n_no_motion=2, logger=None):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.reparamterize = config["reparameterize_poke"] if "reparameterize_poke" in config else False
self.norm_layer = config["norm_layer"] if "norm_layer" in config else "in"
self.layers = config["layers"]
self.n_gru_layers = config["n_gru_layers"] if "n_gru_layers" in config else 3
self.n_no_motion = n_no_motion
self.n_stages = len(self.layers)
assert self.n_no_motion is not None
nf_first_shape_enc = int(max(32, config["nf_deep"] / (2 ** self.n_stages)))
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=self.layers[-1], n_stages=self.n_stages, n_skip_stages=self.n_stages
,nf_first=nf_first_shape_enc, norm_layer=self.norm_layer,layers=self.layers)
self.dynamics_enc = Encoder(2, nf_max=self.layers[-1], n_stages=self.n_stages,
variational=self.reparamterize, norm_layer=self.norm_layer, layers=self.layers)
ups = [False] * self.n_gru_layers
#input_sizes = [self.layers[-1]*2] + (len(self.layers)-1) * [self.layers[-1]]
self.fusion_block = ConvGRU(input_size=self.layers[-1] * 2, hidden_sizes=self.layers[-1], kernel_sizes=3, n_layers=self.n_gru_layers,
upsampling=ups,)
self.dec = SkipConnectionDecoder(nf_in=self.layers[-1], in_channels=self.shape_enc.depths, n_skip_stages=len(self.layers),
disentanglement=False, norm_layer=self.norm_layer, layers=self.layers)
if logger is not None:
logger.info("Constructed OscillatorModel")
logger.info(f"Layers of OscillatorModel is {self.layers}")
logger.info(f"Encoder channels of oscillator model is {self.layers}")
def forward(self,input_img, poke, len,n_ref,target_img=None):
imgs = []
sigmas_hat_out = []
if target_img==None:
target_img = input_img
if self.reparamterize:
delta, mu, _ = self.dynamics_enc(poke)
else:
delta = self.dynamics_enc(poke)[0]
# only first time shape encoding
# if self.poke_scale_mode and not poke_linear:
sigmas = self.shape_enc(input_img)
sigma_tgt = self.shape_enc(target_img)[-1]
sigma_dyn = sigmas.pop()
pred = [sigma_dyn] * self.n_gru_layers
pred_out = pred[-1]
for n in range(len):
# apply fusion block: input is delta, hidden states are the sigma_n
# get inputs for network
delta_in = delta * (1. - float(n)/(n_ref-1)) if n <= n_ref else torch.zeros_like(delta)
if self.training:
sigma_diff = pred_out - sigma_tgt if n < len - self.n_no_motion else torch.zeros_like(pred_out)
else:
sigma_diff = pred_out - sigma_tgt
delta_in = torch.cat([delta_in,sigma_diff],1)
# predict object encoding at next time step
pred = self.fusion_block(delta_in, pred)
pred_out = pred[-1]
sigmas.append(pred_out)
# decode
x = self.dec(sigmas, [], del_shape=False)
imgs.append(x)
sigmas_hat_out.append(pred_out)
#remove pred
sigmas.pop()
imgs = torch.stack(imgs, dim=1)
#sigmas_hat_out[-1].reverse()
return imgs, sigmas_hat_out
class SAVPArchModel(nn.Module):
def __init__(self, spatial_size, config):
super().__init__()
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.poke_every_t = config["poke_every_t"] if "poke_every_t" in config else True
self.dynamics_enc = Encoder(nf_in=2, nf_max=64, n_stages=self.n_stages)
self.gen = SAVPGenerator(self.poke_every_t)
def forward(self,img,poke,len):
# encode dynamics
delta = self.dynamics_enc(poke)[0]
out = self.gen(img,delta,len)
return out
class SAVPGenerator(nn.Module):
def __init__(self, poke_every):
super().__init__()
self.poke_every_t = poke_every
# encoder stuff
self.conv_e1 = Conv2dBlock(3, 32, 3, 2, norm="in", padding=1, activation="relu")
# ssize 32
self.rnn_e1 = ConvGRU(32, 32, 3, 1)
self.conv_e2 = Conv2dBlock(32, 64, 3, 2, norm="in", padding=1, activation="relu")
# ssize 16
self.rnn_e2 = ConvGRU(64, 64, 3, 1)
# bottleneck
self.conv_bn = Conv2dBlock(128, 128, 3, 2, norm="in", padding=1, activation="relu")
# ssize 8
self.rnn_bn = ConvGRU(128, 64, 3, 1)
# decoder stuff
self.up1 = nn.Upsample((16, 16), mode="bilinear")
# ssize 16
self.conv_d1 = Conv2dBlock(128, 64, 3, 1, norm="in", padding=1, activation="relu")
self.rnn_d1 = ConvGRU(64, 32, 3, 1)
self.up2 = nn.Upsample((32, 32), mode="bilinear")
# ssize 32
self.conv_d2 = Conv2dBlock(64, 32, 3, 1, norm="in", padding=1, activation="relu")
self.rnn_d2 = ConvGRU(32, 32, 3, 1)
self.up3 = nn.Upsample((64, 64), mode="bilinear")
self.conv_out = Conv2dBlock(32, 3, 3, 1, 1, norm="none", activation="tanh")
def forward(self,img,delta,len):
x = img
out_imgs = []
for t in range(len):
x1e = self.conv_e1(x)
x1er = self.rnn_e1(x1e,[x1er] if t>0 else None)[0]
x2e = self.conv_e2(x1er)
x2er = self.rnn_e2(x2e,[x2er] if t>0 else None)[0]
if t > 0 and not self.poke_every_t:
delta = np.zeros_like(delta)
xbn = torch.cat([x2er,delta],dim=1)
xbn = self.conv_bn(xbn)
xbnr = self.rnn_bn(xbn,[xbnr] if t>0 else None)[0]
x1d = self.up1(xbnr)
x1d = self.conv_d1(torch.cat([x1d,x2er],1))
x1dr = self.rnn_d1(x1d,[x1dr] if t>0 else None)[0]
x2d = self.up2(x1dr)
x2d = self.conv_d2(torch.cat([x2d,x1er],1))
x2dr = self.rnn_d2(x2d,[x2dr] if t > 0 else None)[0]
x = self.conv_out(self.up3(x2dr))
out_imgs.append(x)
out_imgs = torch.stack(out_imgs,1)
return out_imgs
class ForegroundBackgroundModel(nn.Module):
def __init__(self, spatial_size,config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.cat_poke_img = config["poke_and_img"]
self.zeroflow_baseline = config["zeroflow_baseline"]
self.variational = config["variational"] if "variational" in config else False
foreground_background_div = config["foreground_background_div"]
assert foreground_background_div >= 1.
nf_first_shape_enc = int(max(32, config["nf_deep"] / (2 ** self.n_stages)))
if self.variational:
self.shape_enc = VariationalSkipConnectionEncoderFGBG(nf_in=3,nf_max=config["nf_deep"],n_stages=self.n_stages, n_skip_stages=self.n_stages,
nf_first=nf_first_shape_enc)
else:
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages,
n_skip_stages=self.n_stages, nf_first=nf_first_shape_enc,
fg_bg=True, div=foreground_background_div)
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages)
hidden_sizes = [int(config["nf_deep"]/foreground_background_div)] + [int(d/foreground_background_div) for d in self.shape_enc.depths]
ups = [False] + self.shape_enc.downs
self.fusion_block = ConvGRU(input_size=config["nf_deep"], hidden_sizes=hidden_sizes, kernel_sizes=3,
n_layers=self.n_stages + 1, upsampling=ups)
self.dec = SkipConnectionDecoder(nf_in=config["nf_deep"], in_channels=self.shape_enc.depths,
n_skip_stages=self.n_stages, disentanglement=False)
def forward(self,fg_img,bg_img,poke,len):
x = fg_img
mus = logstds = None
if len > 0:
if self.zeroflow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke, x], dim=1)
imgs = []
sigmas_fg = []
sigmas_bg = []
# infer dynamics input
delta = self.dynamics_enc(poke)[0]
# only first time shape encoding
sigma_n = self.shape_enc(x)[0]
sigma_bg = self.shape_enc(bg_img)[1]
for n in range(len):
# apply fusion block: input is delta, hidden states are the sigma_n
sigma_n.reverse()
sigma_n = self.fusion_block(delta, sigma_n)
sigma_n.reverse()
sigma_cat = [torch.cat([sfg,sbg],dim=1) for sfg,sbg in zip(sigma_n,sigma_bg)]
x = self.dec(sigma_cat, None, del_shape=True)
imgs.append(x)
# output foreground representation
sigmas_fg.append(sigma_n)
# out
sigmas_bg.append(sigma_bg)
imgs = torch.stack(imgs, dim=1)
sigmas_fg[-1].reverse()
else:
if self.variational:
sigmas_fg, sigmas_bg1, mus, logstds = self.shape_enc(x)
_, sigmas_bg2, *_ = self.shape_enc(bg_img)
else:
sigmas_fg, sigmas_bg1 = self.shape_enc(x)
_, sigmas_bg2 = self.shape_enc(bg_img)
sigmas_bg = (sigmas_bg1,sigmas_bg2)
sigmas = [torch.cat([sfg,sbg],dim=1) for sfg,sbg in zip(sigmas_fg,sigmas_bg2)]
imgs = self.dec(sigmas, None, del_shape=True)
sigmas_fg.reverse()
return imgs, sigmas_fg, sigmas_bg, mus, logstds
class SkipSequenceModel(nn.Module):
def __init__(self,spatial_size,config,n_no_motion=None):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.min_spatial_size = config["min_spatial_size"] if "min_spatial_size" in config else 16
self.n_stages = int(np.log2(spatial_size[0] // self.min_spatial_size))
print(f"number of stages in model is {self.n_stages}")
self.disentanglement = config["disentanglement"] if "disentanglement" in config else False
self.cat_poke_img = config["poke_and_img"]
self.zeroflow_baseline = config["zeroflow_baseline"]
self.poke_every_t = config["poke_every_t"] if "poke_every_t" in config else True
use_spectral_norm = config["spectnorm_decoder"] if "spectnorm_decoder" in config else False
self.reparamterize = config["reparameterize_poke"] if "reparameterize_poke" in config else False
self.norm_layer = config["norm_layer"] if "norm_layer" in config else "in"
self.layers = config["layers"] if "layers" in config and len(config["layers"])>0 else None
self.poke_scale_mode = config["poke_scale"] if "poke_scale" in config else False
# self.n_no_motion = n_no_motion
# if self.poke_scale_mode:
# assert self.n_no_motion is not None
# self.multiscale_fusion_block = config["multiscale_dynamics"]
# default is dynamics model
# if self.disentanglement:
# self.appearance_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, prepare_adain=True,
# resnet_down=config["resnet_down"] if "resnet_down" in config else False)
# n_skip_stages = min(config["n_skip_stages"], self.n_stages) if "n_skip_stages" in config else self.n_stages
nf_first_shape_enc = int(max(32, config["nf_deep"] / (2 ** self.n_stages)))
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, n_skip_stages=self.n_stages
,nf_first=nf_first_shape_enc, norm_layer=self.norm_layer,layers=self.layers)
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages,
variational=self.reparamterize, norm_layer=self.norm_layer, layers=self.layers)
hidden_sizes = [config["nf_deep"]]+self.shape_enc.depths
ups = [False] + self.shape_enc.downs
self.fusion_block = ConvGRU(input_size=config["nf_deep"], hidden_sizes=hidden_sizes, kernel_sizes=3, n_layers=self.n_stages+1 if self.layers is None else len(self.layers)+1,
upsampling=ups,)
self.dec = SkipConnectionDecoder(nf_in=config["nf_deep"], in_channels=self.shape_enc.depths, n_skip_stages=self.n_stages if self.layers is None else len(self.layers),
disentanglement=False, spectral_norm=use_spectral_norm, norm_layer=self.norm_layer, layers=self.layers)
def forward(self, app_img, shape_img, poke, len, poke_linear=False,delta_scaling = None, n_zero_frames=0, invert_poke=False, poke_jump=False):
# if self.disentanglement:
# alpha, *_ = self.appearance_enc(app_img)
# else:
# alpha = None
# sigma = self.shape_enc(shape_img)
x = shape_img
if len > 0:
if self.zeroflow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke, app_img], dim=1)
imgs = []
sigmas_hat_out = []
sigmas_out = []
# infer dynamics input
if self.reparamterize:
delta, mu, _ = self.dynamics_enc(poke)
else:
delta = self.dynamics_enc(poke)[0]
sigmas_out = delta
# only first time shape encoding
#if self.poke_scale_mode and not poke_linear:
sigma_n = self.shape_enc(x)
for n in range(len):
# apply fusion block: input is delta, hidden states are the sigma_n
sigma_n.reverse()
if self.poke_scale_mode:
if poke_linear:
if invert_poke:
delta_in = delta * (1 - float(n) / int(len/2)) if n < int(len/2) else delta * (float(n - int(len/2)) / int(math.ceil(float(len)/2)) - 1)
else:
delta_in = (1 - float(n) / (len-n_zero_frames)) * delta if n <= len - n_zero_frames else torch.zeros_like(delta)
else:
delta_in = delta_scaling[n] * delta_in
else:
if poke_jump:
delta_in = delta if n < len -n_zero_frames else torch.zeros_like(delta)
else:
delta_in = delta if self.poke_every_t else (delta if n == 0 else torch.zeros_like(delta))
sigma_n = self.fusion_block(delta_in, sigma_n)
sigma_n.reverse()
x = self.dec(sigma_n, [], del_shape=False)
imgs.append(x)
sigmas_hat_out.append(sigma_n)
imgs = torch.stack(imgs, dim=1)
sigmas_hat_out[-1].reverse()
else:
sigmas = self.shape_enc(x)
sigmas_out = sigmas
sigmas_hat_out = None
imgs = self.dec(sigmas, [], del_shape=False)
sigmas_out.reverse()
return imgs, sigmas_out, sigmas_hat_out, []
class SingleScaleBaseline(nn.Module):
def __init__(self, spatial_size, config, n_no_motion=None):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.disentanglement = config["disentanglement"] if "disentanglement" in config else False
self.cat_poke_img = config["poke_and_img"]
self.zeroflow_baseline = config["zeroflow_baseline"]
self.poke_scale_mode = config["poke_scale"] if "poke_scale" in config else False
self.poke_every_t = config["poke_every_t"] if "poke_every_t" in config else True
self.n_no_motion = n_no_motion
if self.poke_scale_mode:
assert self.n_no_motion is not None
print("Initialize SingleScaleBaseline")
# self.multiscale_fusion_block = config["multiscale_dynamics"]
# default is dynamics model
if self.disentanglement:
self.appearance_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, prepare_adain=True,
resnet_down=config["resnet_down"] if "resnet_down" in config else False)
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, n_skip_stages=0)
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages)
self.n_gru_layers = 3
self.fusion_block = ConvGRU(input_size=config["nf_deep"], hidden_sizes=config["nf_deep"], kernel_sizes=3, n_layers=config["n_gru_layers"])
self.dec = SkipConnectionDecoder(nf_in=config["nf_deep"], in_channels=self.shape_enc.depths, n_skip_stages=0, disentanglement=self.disentanglement)
def forward(self, app_img, shape_img, poke, len, poke_linear=False,delta_scaling = None, n_zero_frames=0, invert_poke=False,poke_jump=False):
if self.disentanglement:
alpha, *_ = self.appearance_enc(app_img)
else:
alpha = None
# sigma = self.shape_enc(shape_img)
if self.zeroflow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke, app_img], dim=1)
x = shape_img
if len > 0:
imgs = []
sigmas_hat_out = []
sigmas_out = []
# infer dynamics input
delta = self.dynamics_enc(poke)[0]
sigma_n = self.shape_enc(x)[0]
sigma_n = torch.stack([sigma_n] * self.n_gru_layers)
for n in range(len):
# delta scaling
delta_in = delta if self.poke_every_t else (delta if n == 0 else torch.zeros_like(delta))
if self.poke_scale_mode:
if poke_linear:
if invert_poke:
delta_in = delta * (1 - float(n) / int(len / 2)) if n < int(len / 2) else delta * (float(n - int(len / 2)) / int(math.ceil(float(len) / 2)) - 1)
else:
delta_in = (1 - float(n) / (len - n_zero_frames)) * delta if n <= len - n_zero_frames else torch.zeros_like(delta)
else:
delta_in = delta_scaling[n] * delta_in
# apply fusion block
sigma_n = self.fusion_block(delta_in, sigma_n)
# residual connection
sigma_n1 = sigma_n[-1]
x = self.dec([sigma_n1], alpha, del_shape=False)
imgs.append(x)
sigmas_hat_out.append(sigma_n1)
#sigmas_hat_out = torch.stack(sigmas_hat_out)
imgs = torch.stack(imgs, dim=1)
else:
sigmas = self.shape_enc(x)
sigmas_out = sigmas[-1]
sigmas_hat_out = None
imgs = self.dec(sigmas, alpha, del_shape=False)
return imgs, sigmas_out, sigmas_hat_out, alpha
class ResidualSequenceBaseline(nn.Module):
def __init__(self,spatial_size,config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.disentanglement = config["disentanglement"] if "disentanglement" in config else False
self.cat_poke_img = config["poke_and_img"]
self.zeroflow_baseline = config["zeroflow_baseline"]
#self.multiscale_fusion_block = config["multiscale_dynamics"]
# default is dynamics model
if self.disentanglement:
self.appearance_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, prepare_adain=True,
resnet_down=config["resnet_down"] if "resnet_down" in config else False)
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, n_skip_stages=0)
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages)
self.n_gru_layers = 3
self.fusion_block = ConvGRU(input_size=config["nf_deep"], hidden_sizes=config["nf_deep"],kernel_sizes=3, n_layers=3)
self.dec = SkipConnectionDecoder(nf_in=config["nf_deep"], in_channels=self.shape_enc.depths, n_skip_stages=0, disentanglement=self.disentanglement)
def forward(self,app_img,shape_img,poke,len):
if self.disentanglement:
alpha, *_ = self.appearance_enc(app_img)
else:
alpha = None
#sigma = self.shape_enc(shape_img)
if self.zeroflow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke,app_img],dim=1)
x = shape_img
if len>0:
imgs = []
sigmas_hat_out = []
sigmas_out = []
# infer dynamics input
delta = self.dynamics_enc(poke)[0]
delta = torch.stack([delta]*self.n_gru_layers)
for n in range(len):
# shape encoding
sigma_n = self.shape_enc(x)[0]
# apply fusion block
delta = self.fusion_block(sigma_n,delta)
# residual connection
sigma_n1 = sigma_n + delta[-1]
x = self.dec([sigma_n1],alpha)
imgs.append(x)
sigmas_hat_out.append(sigma_n1)
sigmas_hat_out = torch.stack(sigmas_hat_out,)
imgs = torch.stack(imgs,dim=1)
else:
sigmas = self.shape_enc(x)
sigmas_out = sigmas[-1]
sigmas_hat_out = None
imgs = self.dec(sigmas,alpha,del_shape=False)
return imgs, sigmas_out, sigmas_hat_out, alpha
class DynamicSkipModel(nn.Module):
def __init__(self, spatial_size,config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.disentanglement = config["disentanglement"] if "disentanglement" in config else False
self.cat_poke_img = config["poke_and_img"]
self.zeroflow_baseline = config["zeroflow_baseline"]
self.multiscale_fusion_block = config["multiscale_dynamics"]
# default is dynamics model
if self.disentanglement:
self.appearance_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, prepare_adain=True,
resnet_down=config["resnet_down"] if "resnet_down" in config else False)
n_skip_stages = min(config["n_skip_stages"],self.n_stages) if "n_skip_stages" in config else self.n_stages
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages,n_skip_stages=n_skip_stages)
if config["multiscale_dynamics"]:
self.dynamics_enc = SkipConnectionEncoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages, n_skip_stages=n_skip_stages)
self.fusion_block = FusionBlockMultiscale(nf_in=config["nf_deep"],nfs=self.shape_enc.depths,n_blocks=config["n_blocks"])
else:
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages)
self.fusion_block = LearnedFusionBlock(nf=config["nf_deep"], n_blocks=config["n_blocks"])
self.dec = SkipConnectionDecoder(nf_in=config["nf_deep"],in_channels=self.shape_enc.depths, n_skip_stages=n_skip_stages,disentanglement=self.disentanglement)
def forward(self,app_img,shape_img,poke, apply_dynamics = False):
if self.disentanglement:
alpha, *_ = self.appearance_enc(app_img)
else:
alpha = None
sigma = self.shape_enc(shape_img)
if self.zeroflow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke,app_img],dim=1)
delta = self.dynamics_enc(poke) if self.multiscale_fusion_block else self.dynamics_enc(poke)[0]
if apply_dynamics:
sigma_hat = self.fusion_block(sigma if self.multiscale_fusion_block else sigma.pop(), delta)
if self.multiscale_fusion_block:
sigma_in_dec = sigma_hat
else:
sigma_in_dec = sigma + [sigma_hat]
sigma_out = sigma_hat
else:
sigma_out = sigma if self.multiscale_fusion_block else sigma[-1]
img = self.dec(sigma_in_dec if apply_dynamics else sigma,alpha,del_shape=False)
return img, sigma_out, alpha
class DisentangledModelWithoutDynamics(nn.Module):
def __init__(self, spatial_size,config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.adain = config["adain"]
# default is dynamics model
self.latent_fusion = config["latent_fusion"] if "latent_fusion" in config else None
self.appearance_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, prepare_adain=self.adain,
resnet_down=config["resnet_down"] if "resnet_down" in config else False)
self.shape_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, variational=config["ib_shape"])
if self.adain:
self.dec = AdaINDecoderDisentangled(nf_in=config["nf_deep"], n_stages=self.n_stages, latent_fusion=self.latent_fusion, nf_in_bn=self.appearance_enc.nf_in_bn)
else:
self.dec = DecoderEntangled(nf_in=2 * config["nf_deep"],n_stages=self.n_stages)
def forward(self,app_img,shape_img):
# appearance representation
alpha, alpha_spatial, *_ = self.appearance_enc(app_img)
# shape representation
sigma, shape_mean, shape_logstd = self.shape_enc(shape_img)
# decode
img = self.dec(alpha, sigma, alpha_spatial)
return img, alpha, sigma, shape_mean,shape_logstd
class BasicDisentangledModel(nn.Module):
def __init__(self, spatial_size,config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.zero_flow_baseline = config["zero_flow_baseline"]
self.adain = config["adain"]
self.cat_poke_img = config["poke_and_img"]
# default is dynamics model
self.latent_fusion = config["latent_fusion"] if "latent_fusion" in config else None
self.appearance_enc = Encoder(nf_in=3,nf_max = config["nf_deep"], n_stages=self.n_stages, prepare_adain=self.adain,
resnet_down=config["resnet_down"] if "resnet_down" in config else False)
self.shape_enc = Encoder(nf_in=3, nf_max = config["nf_deep"],n_stages=self.n_stages, variational=config["ib_shape"])
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages)
self.fusion_block = LearnedFusionBlock(nf=config["nf_deep"],n_blocks=config["n_blocks"])
if self.adain:
self.dec = AdaINDecoderDisentangled(nf_in=config["nf_deep"],n_stages=self.n_stages,latent_fusion=self.latent_fusion,nf_in_bn=self.appearance_enc.nf_in_bn)
else:
self.dec = DecoderEntangled(nf_in=2 * config["nf_deep"],n_stages=self.n_stages)
def forward(self, app_img, shape_img, poke, apply_dynamics = False):
# appearance representation
alpha, alpha_spatial, *_ = self.appearance_enc(app_img)
# shape representation
sigma, shape_mean, shape_logstd = self.shape_enc(shape_img)
# dynamics representation
if self.zero_flow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke,app_img],dim=1)
delta, *_ = self.dynamics_enc(poke)
if self.zero_flow_baseline:
delta = torch.zeros_like(delta)
# apply dynamics to shape represenation
sigma_hat = self.fusion_block(sigma,delta)
# decode
if apply_dynamics:
img = self.dec(alpha,sigma_hat,alpha_spatial)
else:
img = self.dec(alpha,sigma,alpha_spatial)
return img, sigma, sigma_hat, alpha, delta, shape_mean, shape_logstd
class LearnedFusionBlock(nn.Module):
def __init__(self,nf,n_blocks):
super().__init__()
assert n_blocks >= 1
blocks = [ResBlock(2*nf,nf)]
for i in range(1,n_blocks):
blocks.append(ResBlock(nf,nf))
self.model = nn.Sequential(*blocks)
def forward(self,sigma,delta):
x = torch.cat([sigma,delta],dim=1)
x = self.model(x)
return x
class BasicModel(nn.Module):
def __init__(self, spatial_size, config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.zero_flow_baseline = config["zero_flow_baseline"]
self.adain = config["adain"]
self.obj_enc = Encoder(
nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages,variational=config["variational"])
self.flow_enc = Encoder(
nf_in=2,
nf_max=config["nf_deep"],
n_stages=self.n_stages,
prepare_adain=self.adain
)
if self.adain:
self.dec = AdaINDecoderEntangled(
nf_in=config["nf_deep"], n_stages=self.n_stages,latent_fusion=config["latent_fusion"]
)
else:
self.dec = DecoderEntangled(nf_in=2*config["nf_deep"],n_stages=self.n_stages)
def forward(self, image, flow,sample_prior=False):
# get object code and variational paramezers if model is variational
object_code, mean, logstd = self.obj_enc(image,sample_prior)
# get dynamics codes
dynamics_code1, dynamics_code2, _ = self.flow_enc(flow)
if self.zero_flow_baseline:
# this is without flow usage, to measure the impacts of the flow as adain input
dynamics_code1 = torch.zeros_like(dynamics_code1)
dynamics_code2 = torch.zeros_like(dynamics_code2)
# decode
if self.adain:
img = self.dec(object_code, dynamics_code1,dynamics_code2)
else:
img = self.dec(object_code,dynamics_code1)
return img, object_code, dynamics_code1, mean, logstd
class VariationalSkipConnectionEncoderFGBG(nn.Module):
def __init__(self,nf_in,nf_max, n_stages, n_skip_stages, act = "relu", nf_first=None):
super().__init__()
self.blocks = nn.ModuleList()
self.n_stages = n_stages
self.depths = []
self.downs = []
if nf_first is None:
nf = 64
else:
nf = nf_first
# required
self.blocks.append(
NormConv2d(
nf_in, int(1.5 * nf), 3, 2, padding=1
)
)
self.n_skip_stages = n_skip_stages
self.depths.append(nf)
for n in range(self.n_stages - 1):
self.blocks.append(
NormConv2d(
nf,
min(nf * 3, int(1.5*nf_max)),
3,
2,
padding=1,
)
)
nf = min(nf * 2, nf_max)
self.depths.insert(0, nf)
self.downs.insert(0, True)
self.bottleneck = ResBlock(nf, int(1.5 * nf_max), activation=act, stride=1)
self.downs.insert(0, False)
self.squash = nn.Sigmoid()
def _reparameterize(self,codes):
mu = codes[:,:int(codes.shape[1]/2)]
logstd = codes[:,int(codes.shape[1]/2):]
logstd = self.squash(logstd)
std = torch.exp(logstd)
eps = torch.randn_like(std)
return eps.mul(std) + mu, mu, logstd
def forward(self,x):
out_fg = []
out_bg = []
out_logstd = []
out_mu = []
for i in range(self.n_stages):
x = self.blocks[i](x)
if i >= self.n_stages - self.n_skip_stages:
act_div = int(x.shape[1] * 2. / 3.)
sample, mu, logstd = self._reparameterize(x[:,:act_div])
out_fg.append(sample)
bg = x[:,act_div:]
out_bg.append(bg)
out_mu.append(mu)
out_logstd.append(logstd)
x = torch.cat([mu,bg], dim=1)
x = self.bottleneck(x)
act_div = int(x.shape[1] * 2. / 3.)
sample, mu, logstd = self._reparameterize(x[:, :act_div])
out_fg.append(sample)
bg = x[:, act_div:]
out_bg.append(bg)
out_mu.append(mu)
out_logstd.append(logstd)
return out_fg, out_bg, out_mu, out_logstd
class SkipConnectionEncoder(nn.Module):
def __init__(self,nf_in,nf_max, n_stages, n_skip_stages, act = "elu", nf_first=None, fg_bg = False, div= None, norm_layer="in", layers=None):
super().__init__()
self.blocks = nn.ModuleList()
self.n_stages = n_stages if layers is None else len(layers)
self.depths = []
self.downs = []
if nf_first is None:
nf = 32
else:
nf = nf_first
if layers is not None:
nf = layers[0]
self.fg_bg = fg_bg
if self.fg_bg:
assert div is not None
self.div = div
self.blocks.append(
Conv2dBlock(
nf_in, nf, 3, 2, norm=norm_layer, activation=act, padding=1
)
)
self.n_skip_stages = n_skip_stages if layers is None else len(layers)
self.depths.append(nf)
for n in range(self.n_stages - 1):
self.blocks.append(
Conv2dBlock(
nf,
min(nf * 2, nf_max) if layers is None else layers[n+1],
3,
2,
norm=norm_layer,
activation=act,
padding=1,
)
)
nf = min(nf * 2, nf_max) if layers is None else layers[n+1]
self.depths.insert(0,nf)
self.downs.insert(0,True)
self.bottleneck = ResBlock(nf, nf_max, activation=act, stride=1,norm=norm_layer)
self.downs.insert(0,False)
def forward(self,x):
if self.fg_bg:
out_fg = []
out_bg = []
else:
out = []
for i in range(self.n_stages):
x = self.blocks[i](x)
if i >= self.n_stages - self.n_skip_stages:
if self.fg_bg:
act_div = int(x.shape[1] / self.div)
out_fg.append(x[:,:act_div])
out_bg.append(x[:,act_div:])
else:
out.append(x)
x = self.bottleneck(x)
if self.fg_bg:
act_div = int(x.shape[1] / self.div)
out_fg.append(x[:,:act_div])
out_bg.append(x[:,act_div:])
return out_fg, out_bg
else:
out.append(x)
return out
class Encoder(nn.Module):
def __init__(self, nf_in, nf_max, n_stages, prepare_adain=False, variational=False, resnet_down=False, norm_layer = "in", layers=None):
super().__init__()
self.prepare_adain = prepare_adain
self.variational = variational
if self.prepare_adain:
assert not self.variational, "Encoder should not be variational if adain is prepared"
if self.prepare_adain:
self.final_linear = nn.Linear(nf_max, nf_max)
act = "elu" #if self.variational else "relu"
blocks = []
bottleneck = []
nf = 32 if layers is None else layers[0]
blocks.append(
Conv2dBlock(
nf_in, nf, 3, 2, norm=norm_layer, activation=act, padding=1
)
)
n_stages = n_stages if layers is None else len(layers)
for n in range(n_stages - 1):
blocks.append(
Conv2dBlock(
nf,
min(nf * 2, nf_max) if layers is None else layers[n+1],
3,
2,
norm=norm_layer,
activation=act,
padding=1,
)
)
nf = min(nf * 2, nf_max) if layers is None else layers[n+1]
self.resnet_down = resnet_down and self.prepare_adain
self.nf_in_bn = nf
bottleneck.append(ResBlock(nf, nf_max,activation=act, stride=2 if self.resnet_down else 1, norm=norm_layer))
if layers is None:
bottleneck.append(ResBlock(nf_max, nf_max,activation=act, stride=2 if self.resnet_down else 1, norm=norm_layer))
if self.resnet_down:
self.make_vector = Conv2dBlock(nf_max,nf_max,4,1,0)
if self.variational:
self.make_mu = NormConv2d(nf_max,nf_max,3, padding=1)
self.make_sigma = NormConv2d(nf_max,nf_max,3, padding=1)
self.squash = nn.Sigmoid()
self.model = nn.Sequential(*blocks)
self.bottleneck = nn.Sequential(*bottleneck)
def forward(self, input, sample_prior=False):
out = self.model(input)
mean = out
out = self.bottleneck(out)
logstd = None
if self.prepare_adain:
# mean is a false name here, this is the raw channels of the conv model
# mean = out
if self.resnet_down:
# in this case, mean has spatial_size 4x4
out = self.make_vector(out).squeeze(-1).squeeze(-1)
else:
out = F.avg_pool2d(out, out.size(2), padding=0)
out = out.squeeze(-1).squeeze(-1)
# no activation for the first trial, as relu would not allow for values < 0
out = self.final_linear(out)
elif self.variational:
mean = self.make_mu(out)
# normalize sigma in between
logstd = self.squash(self.make_sigma(out))
if sample_prior:
out = torch.randn_like(mean)
else:
out = self.reparametrize(mean,logstd)
return out, mean, logstd
def reparametrize(self,mean,logstd):
std = torch.exp(logstd)
eps = torch.randn_like(std)
return eps.mul(std) + mean
class AdaINDecoderEntangled(nn.Module):
"""
We sample up from spatial resolution 16x16, given quadratic images
"""
def __init__(self, nf_in, n_stages,latent_fusion):
super().__init__()
self.blocks = nn.ModuleList()
self.affines = nn.ModuleList()
self.n_stages = n_stages
self.latent_fusion = latent_fusion
# results latent fusion results in a deeper model
nf = nf_in * 2 if self.latent_fusion else nf_in
self.in_block = ResBlock(nf, nf)
for n in range(self.n_stages):
self.affines.append(AdaINLinear(nf_in, int(nf // 2)))
# upsampling adain layers
self.blocks.append(
ResBlock(nf, int(nf // 2), norm="adain", upsampling=True)
)
nf = int(nf // 2)
self.out_conv = Conv2dBlock(
nf, 3, 3, 1, padding=1, norm="none", activation="tanh"
)
def forward(self, object_code, dynamics_linear,dynamics_spatial):
if self.latent_fusion:
in_code = torch.cat([object_code,dynamics_spatial],dim=1)
else:
in_code = object_code
x = self.in_block(in_code)
for n in range(self.n_stages):
adain_params = self.affines[n](dynamics_linear)
x = self.blocks[n](x, adain_params)
x = self.out_conv(x)
return x
class AdaINDecoderDisentangled(nn.Module):
def __init__(self,nf_in, n_stages, latent_fusion = None, nf_in_bn = 0):
super().__init__()
self.blocks = nn.ModuleList()
self.affines = nn.ModuleList()
self.n_stages = n_stages
self.latent_fusion = False if latent_fusion is None else latent_fusion
if self.latent_fusion:
assert nf_in_bn > 0
# results latent fusion results in a deeper model
nf = nf_in + nf_in_bn if self.latent_fusion else nf_in
# self.bottleneck_adain = bottleneck_adain
self.in_block = ResBlock(nf, nf,)
for n in range(self.n_stages):
self.affines.append(AdaINLinear(nf_in, int(nf // 2)))
# upsampling adain layers
self.blocks.append(
ResBlock(nf, int(nf // 2), norm="adain", upsampling=True)
)
nf = int(nf // 2)
self.out_conv = Conv2dBlock(
nf, 3, 3, 1, padding=1, norm="none", activation="tanh"
)
def forward(self,alpha,sigma,alpha_spatial=None):
if self.latent_fusion:
assert alpha_spatial is not None
in_code = torch.cat([sigma, alpha_spatial],dim=1)
else:
in_code = sigma
x = self.in_block(in_code)
for n in range(self.n_stages):
adain_params = self.affines[n](alpha)
x = self.blocks[n](x, adain_params)
x = self.out_conv(x)
return x
class SkipConnectionDecoder(nn.Module):
def __init__(self,nf_in, in_channels, n_skip_stages, disentanglement=False, spectral_norm=False, norm_layer="in",layers=None):
super().__init__()
self.n_stages = len(in_channels)
self.disentanglement = disentanglement
self.n_skip_stages = n_skip_stages
self.blocks = nn.ModuleList()
if self.disentanglement:
self.affines = nn.ModuleList()
nf = nf_in
self.in_block = ResBlock(nf,in_channels[0], snorm=spectral_norm, norm=norm_layer)
for i,nf in enumerate(in_channels):
if layers is None:
n_out = int(nf // 2) if i < len(in_channels) - 1 else nf
if self.disentanglement:
self.affines.append(AdaINLinear(nf_in,n_out))
nf_in_dec = 2 * nf if i < self.n_skip_stages else nf
if layers is not None:
nf_in_dec = 2 * nf
n_out = in_channels[i+1] if i < len(in_channels) -1 else nf
self.blocks.append(ResBlock(nf_in_dec, n_out , norm="adain" if self.disentanglement else norm_layer, upsampling=True,snorm=spectral_norm))
self.out_conv = Conv2dBlock(nf,3,3,1,1,norm="none",activation="tanh")
def forward(self,shape, appearance = None, del_shape=True):
x = self.in_block(shape.pop() if del_shape else shape[-1])
for n in range(self.n_stages):
if n < self.n_skip_stages:
x = torch.cat([x,shape.pop() if del_shape else shape[self.n_skip_stages-1-n]],1)
if self.disentanglement:
adain_params = self.affines[n](appearance)
x = self.blocks[n](x,adain_params)
else:
x = self.blocks[n](x)
if del_shape:
assert not shape
out = self.out_conv(x)
return out
class DecoderEntangled(nn.Module):
"""
We sample up from spatial resolution 16x16, given quadratic images
"""
def __init__(self, nf_in, n_stages):
super().__init__()
self.blocks = nn.ModuleList()
self.n_stages = n_stages
nf = nf_in
self.in_block = ResBlock(nf, nf)
for n in range(self.n_stages):
self.blocks.append(
ResBlock(nf, int(nf // 2), norm="in", upsampling=True)
)
nf = int(nf // 2)
self.out_conv = Conv2dBlock(
nf, 3, 3, 1, padding=1, norm="none", activation="tanh"
)
def forward(self, object_code, dynamics_code,*args):
in_code = torch.cat([object_code,dynamics_code],dim=1)
x = self.in_block(in_code)
for n in range(self.n_stages):
x = self.blocks[n](x)
x = self.out_conv(x)
return x
class FusionBlockMultiscale(nn.Module):
def __init__(self,nf_in,nfs,n_blocks):
super().__init__()
self.blocks = nn.ModuleList()
self.n_stages = len(nfs) + 1
nf = nf_in
for n in range(self.n_stages):
self.blocks.append(LearnedFusionBlock(nf,n_blocks))
if n < len(nfs):
nf = nfs[n]
def forward(self,sigmas,deltas):
out = []
for i,n in enumerate(range(len(sigmas)-1,-1,-1)) :
out.insert(0,self.blocks[i](sigmas[n],deltas[n]))
return out
| 45,992 | 39.274081 | 181 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/models/blocks.py | import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils import weight_norm, spectral_norm
from torch.nn import init
class ResBlock(nn.Module):
def __init__(
self,
dim_in,
dim_out,
norm="in",
activation="elu",
pad_type="zero",
upsampling=False,
stride = 1,
snorm=False
):
super(ResBlock, self).__init__()
self.norm = norm
self.model = nn.ModuleList()
if upsampling:
self.conv1 = Conv2dTransposeBlock(
dim_in,
dim_out,
3,
2,
1,
norm=self.norm,
activation=activation,
snorm= snorm
)
self.conv2 = Conv2dBlock(
dim_out,
dim_out,
3,
1,
1,
norm=self.norm,
activation="none",
pad_type=pad_type,
snorm=snorm
)
else:
self.conv1 = Conv2dBlock(
dim_in,
dim_out,
3,
stride,
1,
norm=self.norm,
activation=activation,
pad_type=pad_type,
snorm=snorm
)
self.conv2 = Conv2dBlock(
dim_out,
dim_out,
3,
1,
1,
norm=self.norm,
activation="none",
pad_type=pad_type,
snorm=snorm
)
self.convolve_res = dim_in != dim_out or upsampling or stride != 1
if self.convolve_res:
if not upsampling:
self.res_conv = Conv2dBlock(dim_in,dim_out,3,stride,1,
norm="in",
activation=activation,
pad_type=pad_type,
snorm=snorm)
else:
self.res_conv = Conv2dTransposeBlock(dim_in,dim_out,3,2,1,
norm="in",
activation=activation,
snorm=snorm)
def forward(self, x,adain_params=None):
residual = x
if self.convolve_res:
residual = self.res_conv(residual)
out = self.conv1(x,adain_params)
out = self.conv2(out,adain_params)
out += residual
return out
class Conv2dBlock(nn.Module):
def __init__(
self,
in_dim,
out_dim,
ks,
st,
padding=0,
norm="none",
activation="elu",
pad_type="zero",
use_bias=True,
activation_first=False,
snorm=False
):
super().__init__()
self.use_bias = use_bias
self.activation_first = activation_first
# initialize padding
if pad_type == "reflect":
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == "replicate":
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == "zero":
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = out_dim
if norm == "bn":
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == "in":
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == "group":
self.norm = nn.GroupNorm(num_channels=norm_dim,num_groups=16)
elif norm == "adain":
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == "none":
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == "relu":
self.activation = nn.ReLU(inplace=True)
elif activation == "lrelu":
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == "tanh":
self.activation = nn.Tanh()
elif activation == "elu":
self.activation = nn.ELU()
elif activation == "none":
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
if snorm:
self.conv = spectral_norm(nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias))
else:
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
def forward(self, x, adain_params=None):
if self.activation_first:
if self.activation:
x = self.activation(x)
x = self.conv(self.pad(x))
if self.norm and not isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x)
elif isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x, adain_params)
else:
x = self.conv(self.pad(x))
if self.norm and not isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x)
elif isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x, adain_params)
if self.activation:
x = self.activation(x)
return x
class NormConv2d(nn.Module):
"""
Convolutional layer with l2 weight normalization and learned scaling parameters
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, padding=0
):
super().__init__()
self.beta = nn.Parameter(
torch.zeros([1, out_channels, 1, 1], dtype=torch.float32)
)
self.gamma = nn.Parameter(
torch.ones([1, out_channels, 1, 1], dtype=torch.float32)
)
self.conv = weight_norm(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
name="weight",
)
def forward(self, x):
# weight normalization
# self.conv.weight = normalize(self.conv.weight., dim=[0, 2, 3])
out = self.conv(x)
out = self.gamma * out + self.beta
return out
class Conv2dTransposeBlock(nn.Module):
def __init__(
self,
in_dim,
out_dim,
ks,
st,
padding=0,
norm="none",
activation="elu",
use_bias=True,
activation_first=False,
snorm=False
):
super().__init__()
self.use_bias = use_bias
self.activation_first = activation_first
# initialize normalization
norm_dim = out_dim
if norm == "bn":
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == "in":
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == "group":
self.norm = nn.GroupNorm(num_channels=norm_dim,num_groups=16)
elif norm == "adain":
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == "none":
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == "elu":
self.activation = nn.ReLU(inplace=True)
elif activation == "lrelu":
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == "tanh":
self.activation = nn.Tanh()
elif activation == "none":
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
if snorm:
self.conv = spectral_norm(nn.ConvTranspose2d(in_dim, out_dim, ks, st, bias=self.use_bias, padding=padding, output_padding=padding))
else:
self.conv = nn.ConvTranspose2d(in_dim, out_dim, ks, st, bias=self.use_bias, padding=padding,output_padding=padding)
def forward(self, x, adain_params=None):
if self.activation_first:
if self.activation:
x = self.activation(x)
x = self.conv(x)
if self.norm and not isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x)
elif isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x, adain_params)
else:
x = self.conv(x)
if self.norm and not isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x)
elif isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x, adain_params)
if self.activation:
x = self.activation(x)
return x
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features))
def forward(self, x, adain_params):
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(
x_reshaped,
running_mean,
running_var,
adain_params["weight"],
adain_params["bias"],
True,
self.momentum,
self.eps,
)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + "(" + str(self.num_features) + ")"
class AdaINLinear(nn.Module):
def __init__(self, in_units, target_units, use_bias=True, actfn=nn.ReLU):
super().__init__()
self.linear = nn.Linear(in_units, 2 * target_units, bias=use_bias)
self.act_fn = actfn()
def forward(self, x):
out = self.act_fn(self.linear(x))
out = {
"weight": out[:, : out.size(1) // 2],
"bias": out[:, out.size(1) // 2 :],
}
return out
class ConvGRUCell(nn.Module):
"""
Generate a convolutional GRU cell
"""
def __init__(self, input_size, hidden_size, kernel_size,upsample=False):
super().__init__()
padding = kernel_size // 2
self.input_size = input_size
self.upsample = upsample
self.hidden_size = hidden_size
self.reset_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
self.update_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
self.out_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
if self.upsample:
self.up_gate = nn.ConvTranspose2d(input_size,input_size,kernel_size,2,padding=padding, output_padding=padding)
init.orthogonal_(self.reset_gate.weight)
init.orthogonal_(self.update_gate.weight)
init.orthogonal_(self.out_gate.weight)
init.constant_(self.reset_gate.bias, 0.)
init.constant_(self.update_gate.bias, 0.)
init.constant_(self.out_gate.bias, 0.)
if self.upsample:
init.orthogonal_(self.up_gate.weight)
init.constant_(self.up_gate.bias, 0.)
def forward(self, input_, prev_state):
if self.upsample:
input_ = self.up_gate(input_)
# get batch and spatial sizes
batch_size = input_.data.size()[0]
spatial_size = input_.data.size()[2:]
# generate empty prev_state, if None is provided
if prev_state is None:
state_size = [batch_size, self.hidden_size] + list(spatial_size)
if torch.cuda.is_available():
prev_state = torch.zeros(state_size).cuda()
else:
prev_state = torch.zeros(state_size)
# data size is [batch, channel, height, width]
stacked_inputs = torch.cat([input_, prev_state], dim=1)
update = torch.sigmoid(self.update_gate(stacked_inputs))
reset = torch.sigmoid(self.reset_gate(stacked_inputs))
out_inputs = torch.tanh(self.out_gate(torch.cat([input_, prev_state * reset], dim=1)))
new_state = prev_state * (1 - update) + out_inputs * update
return new_state
class ConvGRU(nn.Module):
def __init__(self, input_size, hidden_sizes, kernel_sizes, n_layers, upsampling:list=None):
'''
Generates a multi-layer convolutional GRU.
Preserves spatial dimensions across cells, only altering depth.
Parameters
----------
input_size : integer. depth dimension of input tensors.
hidden_sizes : integer or list. depth dimensions of hidden state.
if integer, the same hidden size is used for all cells.
kernel_sizes : integer or list. sizes of Conv2d gate kernels.
if integer, the same kernel size is used for all cells.
n_layers : integer. number of chained `ConvGRUCell`.
'''
super(ConvGRU, self).__init__()
if upsampling is None:
upsampling = [False]*n_layers
self.input_size = input_size
if type(hidden_sizes) != list:
self.hidden_sizes = [hidden_sizes]*n_layers
else:
assert len(hidden_sizes) == n_layers, '`hidden_sizes` must have the same length as n_layers'
self.hidden_sizes = hidden_sizes
if type(kernel_sizes) != list:
self.kernel_sizes = [kernel_sizes]*n_layers
else:
assert len(kernel_sizes) == n_layers, '`kernel_sizes` must have the same length as n_layers'
self.kernel_sizes = kernel_sizes
self.n_layers = n_layers
self.cells = []
for i in range(self.n_layers):
if i == 0:
input_dim = self.input_size
else:
input_dim = self.hidden_sizes[i - 1]
self.cells.append(ConvGRUCell(input_dim, self.hidden_sizes[i], self.kernel_sizes[i],upsample=upsampling[i]))
self.cells = nn.Sequential(*self.cells)
def forward(self, x, hidden=None):
'''
Parameters
----------
x : 4D input tensor. (batch, channels, height, width).
hidden : list of 4D hidden state representations. (layer, batch, channels, height, width).
Returns
-------
upd_hidden : 5D hidden representation. (layer, batch, channels, height, width).
'''
if hidden is None:
hidden = [None]*self.n_layers
input_ = x
upd_hidden = []
for layer_idx in range(self.n_layers):
cell = self.cells[layer_idx]
cell_hidden = hidden[layer_idx]
# pass through layer
upd_cell_hidden = cell(input_, cell_hidden)
upd_hidden.append(upd_cell_hidden)
# update input_ to the last updated hidden layer for next pass
input_ = upd_cell_hidden
# retain tensors in list to allow different hidden sizes
return upd_hidden
# taken from official NVLabs implementation
# Creates SPADE normalization layer based on the given configuration
# SPADE consists of two steps. First, it normalizes the activations using
# your favorite normalization method, such as Batch Norm or Instance Norm.
# Second, it applies scale and bias to the normalized output, conditioned on
# the segmentation map.
# The format of |config_text| is spade(norm)(ks), where
# (norm) specifies the type of parameter-free normalization.
# (e.g. syncbatch, batch, instance)
# (ks) specifies the size of kernel in the SPADE module (e.g. 3x3)
# Example |config_text| will be spadesyncbatch3x3, or spadeinstance5x5.
# Also, the other arguments are
# |norm_nc|: the #channels of the normalized activations, hence the output dim of SPADE
# |label_nc|: the #channels of the input semantic map, hence the input dim of SPADE
class SPADE(nn.Module):
def __init__(self, norm_nc, label_nc, config):
super().__init__()
param_free_norm_type = config["base_norm_spade"] if "base_norm_spade" in config else "instance"
ks = 3
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_shared = nn.Sequential(
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
nn.ReLU()
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
actv = self.mlp_shared(segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out | 17,622 | 33.690945 | 143 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/experiments/experiment.py | from abc import abstractmethod
import torch
import wandb
import os
from os import path
from glob import glob
import numpy as np
from utils.general import get_logger
WANDB_DISABLE_CODE = True
class Experiment:
def __init__(self, config:dict, dirs: dict, device):
self.parallel = isinstance(device, list)
self.config = config
self.logger = get_logger(self.config["general"]["project_name"])
self.is_debug = self.config["general"]["debug"]
if self.is_debug:
self.logger.info("Running in debug mode")
if self.parallel:
self.device = torch.device(
f"cuda:{device[0]}" if torch.cuda.is_available() else "cpu"
)
self.all_devices = device
self.logger.info("Running experiment on multiple gpus!")
else:
self.device = device
self.all_devices = [device]
self.dirs = dirs
if torch.cuda.is_available():
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(dev.index if self.parallel else dev) for dev in self.all_devices])
if self.config["general"]["restart"]:
self.logger.info(f'Resume training run with name "{self.config["general"]["project_name"]}" on device(s) {self.all_devices}')
else:
self.logger.info(f'Start new training run with name "{self.config["general"]["project_name"]}" on device(s) {self.all_devices}')
########## seed setting ##########
torch.manual_seed(self.config["general"]["seed"])
torch.cuda.manual_seed(self.config["general"]["seed"])
np.random.seed(self.config["general"]["seed"])
# random.seed(opt.seed)
torch.backends.cudnn.deterministic = True
torch.manual_seed(self.config["general"]["seed"])
rng = np.random.RandomState(self.config["general"]["seed"])
if self.config["general"]["mode"] == "train":
project = "visual_poking_unsupervised"
wandb.init(
dir=self.dirs["log"],
project=project,
name=self.config["general"]["project_name"],
group=self.config["general"]["experiment"],
)
# log paramaters
self.logger.info("Training parameters:")
for key in self.config:
if key != "testing":
self.logger.info(f"{key}: {self.config[key]}") # print to console
wandb.config.update({key: self.config[key]}) # update wandb config
def _load_ckpt(self, key, dir=None,name=None, single_opt = True, use_best=False, load_name = "model"):
if dir is None:
dir = self.dirs["ckpt"]
if name is None:
if len(os.listdir(dir)) > 0:
ckpts = glob(path.join(dir,"*.pt"))
# load latest stored checkpoint
ckpts = [ckpt for ckpt in ckpts if key in ckpt.split("/")[-1]]
if len(ckpts) == 0:
self.logger.info(f"*************No ckpt found****************")
op_ckpt = mod_ckpt = None
return mod_ckpt, op_ckpt
if use_best:
ckpts = [x for x in glob(path.join(dir,"*.pt")) if "=" in x.split("/")[-1]]
ckpts = {float(x.split("=")[-1].split(".")[0]): x for x in ckpts}
ckpt = torch.load(
ckpts[max(list(ckpts.keys()))], map_location="cpu"
)
else:
ckpts = {float(x.split("_")[-1].split(".")[0]): x for x in ckpts}
ckpt = torch.load(
ckpts[max(list(ckpts.keys()))], map_location="cpu"
)
mod_ckpt = ckpt[load_name] if load_name in ckpt else None
if single_opt:
key = [key for key in ckpt if key.startswith("optimizer")]
assert len(key) == 1
key = key[0]
op_ckpt = ckpt[key]
else:
op_ckpt = {key: ckpt[key] for key in ckpt if "optimizer" in key}
msg = "best model" if use_best else "model"
if mod_ckpt is not None:
self.logger.info(f"*************Restored {msg} with key {key} from checkpoint****************")
else:
self.logger.info(f"*************No ckpt for {msg} with key {key} found, not restoring...****************")
if op_ckpt is not None:
self.logger.info(f"*************Restored optimizer with key {key} from checkpoint****************")
else:
self.logger.info(f"*************No ckpt for optimizer with key {key} found, not restoring...****************")
else:
mod_ckpt = op_ckpt = None
return mod_ckpt, op_ckpt
else:
# fixme add checkpoint loading for best performing models
ckpt_path = path.join(dir,name)
if not path.isfile(ckpt_path):
self.logger.info(f"*************No ckpt for model and optimizer found under {ckpt_path}, not restoring...****************")
mod_ckpt = op_ckpt = None
else:
if "epoch_ckpts" in ckpt_path:
mod_ckpt = torch.load(
ckpt_path, map_location="cpu"
)
op_path = ckpt_path.replace("model@","opt@")
op_ckpt = torch.load(op_path,map_location="cpu")
return mod_ckpt,op_ckpt
ckpt = torch.load(ckpt_path, map_location="cpu")
mod_ckpt = ckpt[load_name] if load_name in ckpt else None
op_ckpt = ckpt["optimizer"] if "optimizer" in ckpt else None
if mod_ckpt is not None:
self.logger.info(f"*************Restored model under {ckpt_path} ****************")
else:
self.logger.info(f"*************No ckpt for model found under {ckpt_path}, not restoring...****************")
if op_ckpt is not None:
self.logger.info(f"*************Restored optimizer under {ckpt_path}****************")
else:
self.logger.info(f"*************No ckpt for optimizer found under {ckpt_path}, not restoring...****************")
return mod_ckpt,op_ckpt
@abstractmethod
def train(self):
"""
Here, the experiment shall be run
:return:
"""
pass
@abstractmethod
def test(self):
"""
Here the prediction shall be run
:param ckpt_path: The path where the checkpoint file to load can be found
:return:
"""
pass
| 6,865 | 40.361446 | 140 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/experiments/fixed_length_model.py | import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.optim import Adam
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.contrib.handlers import ProgressBar
from ignite.metrics import Average, MetricUsage
import numpy as np
import wandb
from functools import partial
from lpips import LPIPS
from tqdm import tqdm
from experiments.experiment import Experiment
from data import get_dataset
from data.samplers import FixedLengthSampler
from models.latent_flow_net import SingleScaleBaseline,SkipSequenceModel
from models.discriminator import GANTrainer
from utils.losses import PerceptualVGG,vgg_loss_agg,DynamicsLoss, style_loss
from utils.testing import make_flow_grid, make_img_grid, make_video, make_plot
from utils.metrics import metric_fid, FIDInceptionModel, metric_lpips, psnr_lightning, ssim_lightning
from utils.general import linear_var, get_member, get_patches
class FixedLengthModel(Experiment):
def __init__(self, config, dirs, device):
super().__init__(config, dirs, device)
self.datakeys = ["images","poke"]
if self.config["architecture"]["disentanglement"]:
self.datakeys.append("img_aT")
self.datakeys.append("app_img_random")
# used for efficient metrics computation
self.fid_feats_real_per_frame = {}
self.fid_feats_fake_per_frame = {}
self.psnrs = {"t": [], "tk": [], "pl" : []}
self.ssims = {"t": [], "tk": [], "pl" : []}
self.lpips = {"t": [], "tk": []}
self.use_gan = self.config["gan"]["use"]
self.use_temp_disc = self.config["gan_temp"]["use"]
if self.use_temp_disc:
if not self.config["gan_temp"]["patch_temp_disc"]:
assert not self.config["gan_temp"]["conditional"]
#self.pixel_decoder_loss = self.config["training"]["pixel_dynamics_weight"] > 0
self.lr_dec_t = 0
self.target_dev = None
# metrics for each frame
self.ssims_per_frame = {}
self.lpips_per_frame = {}
self.psnrs_per_frame = {}
# self.ssims_per_frame_pl = {}
# self.psnrs_per_frame_pl = {}
self.lpips_avg = None
self.custom_sampler = self.config["training"]["custom_sampler"] if "custom_sampler" in self.config["training"] else False
self.poke_jump = self.config["training"]["poke_jump"] if "poke_jump" in self.config["training"] else False
self.poke_scale_mode = self.config["architecture"]["poke_scale"] if "poke_scale" in self.config["architecture"] else False
if self.poke_jump:
assert not self.poke_scale_mode
def __clear_metric_arrs(self):
[self.psnrs[key].clear() for key in self.psnrs]
[self.ssims[key].clear() for key in self.ssims]
[self.lpips[key].clear() for key in self.lpips]
self.lpips_per_frame = {}
self.psnrs_per_frame = {}
self.ssims_per_frame = {}
self.fid_feats_real_per_frame = {}
self.fid_feats_fake_per_frame = {}
# self.ssims_per_frame_pl = {}
# self.psnrs_per_frame_pl = {}
def train(self):
########## checkpoints ##########
if self.config["general"]["restart"] and not self.is_debug:
mod_ckpt, op_ckpts = self._load_ckpt("reg_ckpt", single_opt=False)
op_ckpt_dis = op_ckpts["optimizer_dis"]
op_ckpt_dyn = op_ckpts["optimizer_dyn"]
else:
mod_ckpt = op_ckpt_dis = op_ckpt_dyn = None
# get datasets for training and testing
def w_init_fn(worker_id):
return np.random.seed(np.random.get_state()[1][0] + worker_id)
if not self.poke_scale_mode:
del self.config["data"]["n_ref_frames"]
dataset, transforms = get_dataset(config=self.config["data"])
train_dataset = dataset(transforms, self.datakeys, self.config["data"], train=True)
test_datakeys = self.datakeys + ["app_img_random"] if self.config["testing"]["eval_app_transfer"] and "app_img_random" not in self.datakeys else self.datakeys
test_datakeys.append("flow")
test_dataset = dataset(transforms, test_datakeys, self.config["data"], train=False)
if self.custom_sampler:
train_sampler = FixedLengthSampler(train_dataset, self.config["training"]["batch_size"],shuffle=True,
weighting=train_dataset.obj_weighting,drop_last=True,zero_poke=True, zero_poke_amount=self.config["training"]["zeropoke_amount"])
train_loader = DataLoader(train_dataset, batch_sampler=train_sampler,num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,)
test_sampler = FixedLengthSampler(test_dataset, batch_size=self.config["training"]["batch_size"], shuffle=True,
drop_last=True, weighting=test_dataset.obj_weighting,zero_poke=True,zero_poke_amount=self.config["training"]["zeropoke_amount"])
test_loader = DataLoader(
test_dataset,
batch_sampler=test_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"], #
worker_init_fn=w_init_fn,
)
eval_sampler = FixedLengthSampler(test_dataset,batch_size=self.config["testing"]["test_batch_size"],shuffle=True,
drop_last=True,weighting=test_dataset.obj_weighting,zero_poke=False)
eval_loader = DataLoader(test_dataset,
batch_sampler=eval_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,)
self.logger.info("Using custom fixed length sampler.")
else:
self.logger.info("Using standard pytorch random sampler")
train_sampler = RandomSampler(train_dataset)
train_loader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=self.config["training"]["batch_size"],
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,
drop_last=True
)
test_sampler = RandomSampler(test_dataset,)
test_loader = DataLoader(
test_dataset,
batch_size=self.config["training"]["batch_size"],
sampler=test_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,
drop_last=True
)
# no zeropoke for evaluation as zeropoke is only to ensure no reaction when poking outside
eval_sampler = SequentialSampler(test_dataset,)
eval_loader = DataLoader(test_dataset,
sampler=eval_sampler,
batch_size=self.config["testing"]["test_batch_size"],
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,
drop_last=True)
# define model
self.logger.info(f"Load model...")
#net_model = SkipSequenceModel if self.config["architecture"]["use_skip_model"] else ResidualSequenceBaseline
net = SkipSequenceModel(spatial_size=self.config["data"]["spatial_size"],config=self.config["architecture"]) if self.config["architecture"]["use_skip_model"] else \
SingleScaleBaseline(spatial_size=self.config["data"]["spatial_size"],
config=self.config["architecture"], )
self.logger.info(
f"Number of trainable parameters in model is {sum(p.numel() for p in net.parameters())}"
)
if self.config["general"]["restart"] and mod_ckpt is not None:
self.logger.info("Load pretrained paramaters and resume training.")
net.load_state_dict(mod_ckpt)
if self.parallel:
net = torch.nn.DataParallel(net, device_ids=self.all_devices)
net.cuda(self.all_devices[0])
self.logger.info("Model on gpu!")
# log weights and gradients
wandb.watch(net, log="all")
# define optimizers
# appearance and shape disentanglement
dis_params = [{"params": get_member(net,"shape_enc").parameters(), "name": "shape_encoder"},
{"params": get_member(net,"dec").parameters(), "name": "decoder"}
]
optimizer_dis = Adam(dis_params, lr=self.config["training"]["lr"])
if self.config["general"]["restart"] and op_ckpt_dis is not None:
self.logger.info("Load state_dict of optimizer.")
optimizer_dis.load_state_dict(op_ckpt_dis)
milestones = [int(self.config["training"]["n_epochs"] * t) for t in self.config["training"]["tau"]]
scheduler_dis = torch.optim.lr_scheduler.MultiStepLR(optimizer_dis, milestones=milestones, gamma=self.config["training"]["lr_reduce"])
# dynamics
dyn_params = [{"params": get_member(net,"dynamics_enc").parameters(), "name": "dynamics_encoder", },
{"params": get_member(net,"fusion_block").parameters(), "name": "fusion_block",},]
if self.config["training"]["decoder_update_tk"]:
dyn_params.append({"params": get_member(net,"dec").parameters(), "name": "decoder"})
if "singlestage" in self.config["training"] and self.config["training"]["singlestage"]:
dyn_params.append({"params": get_member(net, "shape_enc").parameters(), "name": "shape_encoder"})
optimizer_dyn = Adam(dyn_params, lr = self.config["training"]["lr"])
if self.config["general"]["restart"] and op_ckpt_dyn is not None:
self.logger.info("Load state_dict of optimizer.")
optimizer_dyn.load_state_dict(op_ckpt_dyn)
milestones = [int(self.config["training"]["n_epochs"] * t) for t in self.config["training"]["tau"]]
scheduler_dyn = torch.optim.lr_scheduler.MultiStepLR(optimizer_dyn, milestones=milestones, gamma=self.config["training"]["lr_reduce"])
# initialize disc if gan mode is enabled
if self.use_gan:
gan_trainer = GANTrainer(self.config, self._load_ckpt, self.logger,spatial_size=self.config["data"]["spatial_size"][0] ,
parallel=self.parallel, devices=self.all_devices, debug=self.is_debug)
if self.use_temp_disc:
gan_trainer_temp = GANTrainer(self.config, self._load_ckpt,self.logger,spatial_size=self.config["data"]["spatial_size"][0],
parallel=self.parallel,devices=self.all_devices, debug=self.is_debug,temporal=True, sequence_length=train_dataset.max_frames)
# set start iteration and epoch in case model training is resumed
start_it = 0
start_epoch = 0
n_epoch_train = self.config["training"]["n_epochs"]
if self.config["general"]["restart"] and op_ckpts is not None:
start_it = list(optimizer_dis.state_dict()["state"].values())[-1]["step"]
start_epoch = int(np.floor(start_it / len(train_loader)))
assert self.config["training"]["n_epochs"] > start_epoch
n_epoch_train = self.config["training"]["n_epochs"] - start_epoch
#
lr_dec_rec = partial(linear_var,start_it=0,
end_it=self.config["training"]["lr_dec_end_it"],
start_val=self.config["training"]["lr"],
end_val=self.config["training"]["lr_dec_end_val"],
clip_min=0,
clip_max=self.config["training"]["lr"],)
self.lr_dec_t = lr_dec_rec(start_it)
# losses
self.logger.info("Load VGG")
self.vgg = PerceptualVGG()
if self.parallel:
self.vgg = torch.nn.DataParallel(self.vgg,device_ids=self.all_devices)
self.vgg.cuda(self.all_devices[0])
self.logger.info("VGG on gpu")
# from torchsummary import summary
# summary(vgg.vgg,(3,224,224))
self.logger.info("Initialize persistent losses")
latent_dynamics_loss = DynamicsLoss(config=self.config["training"])
self.logger.info("Finished initializing persistent losses.")
def train_step(engine,batch):
net.train()
# prepare data
weights=None
loss_dis = 0
out_dict = {}
if train_dataset.flow_weights:
poke = batch["poke"][0].cuda(self.all_devices[0])
weights = batch["poke"][1].cuda(self.all_devices[0])
else:
poke = batch["poke"].cuda(self.all_devices[0])
x_t = batch["images"][:, 0].cuda(self.all_devices[0])
x_seq = batch["images"][:, 1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
shape_img = batch["img_aT"].cuda(self.all_devices[0])
# apply style loss
app_img_tr = batch["app_img_random"].cuda(self.all_devices[0])
x_trans, *_ = net(app_img_tr,x_t,poke,len=0)
loss_style = style_loss(self.vgg,app_img_tr,x_trans)
loss_dis = self.config["training"]["style_loss_weight"] * loss_style
out_dict.update({"style_loss": loss_style.item()})
else:
shape_img = x_t
x_t_hat_i, sigma_t, _ , alpha = net(x_seq[:,-1],shape_img,poke,len=0)
n_ref_frames = self.config["data"]["n_ref_frames"] - 1 if self.poke_scale_mode else train_dataset.max_frames -1
# static loss to obtain fixed image state space
if "singlestage" not in self.config["training"] or not self.config["training"]["singlestage"]:
loss_dis = loss_dis + vgg_loss_agg(self.vgg, x_t, x_t_hat_i)
#optimize parameter of appearance, shape encoders and decoder
optimizer_dis.zero_grad()
loss_dis.backward()
optimizer_dis.step()
out_dict.update({"loss_dis" : loss_dis.item()})
#optimize in alternating gradient descent as this results in equal results than training the static/dynamic model in two completely seperate stages
# however, it performs significantly better than training both models jointly with a single optimizer step (see ablations or run the model with 'singlestage' set to true)
# forward pass for training of dynamics part of the model
# dynamics losses
seq_len = x_seq.shape[1]
seq_rec, mu_delta, sigmas_hat, logstd_delta = net(x_t,shape_img,poke,len=seq_len,
poke_linear=self.poke_scale_mode,
n_zero_frames=seq_len-n_ref_frames-1, poke_jump=self.poke_jump)
sigmas_gt = []
ll_loss_dyn = []
rec_imgs = []
if weights is not None:
seq_rec = get_patches(seq_rec,weights,self.config["data"],train_dataset.weight_value_flow, logger=self.logger)
x_seq = get_patches(x_seq,weights,self.config["data"],train_dataset.weight_value_flow, logger=self.logger)
for n in range(seq_len):
x_hat_tn,s_tn,*_ = net(x_seq[:,n],x_seq[:,n],poke,len=0)
sigmas_gt.append(s_tn)
rec_imgs.append(x_hat_tn)
w = 1. if n != n_ref_frames else self.config["training"]["target_weight"]
ll_dyn_n =w * vgg_loss_agg(self.vgg,x_seq[:,n],seq_rec[:,n])
ll_loss_dyn.append(ll_dyn_n)
ll_loss_dyn = torch.stack(ll_loss_dyn,dim=0).mean()
rec_imgs = torch.stack(rec_imgs,1)
#latent dynamics
dyn_losses = []
for s_tk,s_hat_tk in zip(sigmas_gt,sigmas_hat):
dyn_losses.append(latent_dynamics_loss(s_hat_tk,s_tk,[]))
latent_loss_dyn = torch.stack(dyn_losses).mean()
loss_dyn = self.config["training"]["vgg_dyn_weight"] * ll_loss_dyn + self.config["training"]["latent_dynamics_weight"] * latent_loss_dyn
if self.use_gan and engine.state.iteration >= self.config["gan"]["start_iteration"]:
if self.config["gan"]["pixel_dynamics"]:
offsets = np.random.choice(np.arange(max(1,x_seq.shape[1]-train_dataset.max_frames)),size=x_seq.shape[0])
true_exmpls = torch.stack([seq[o:o+train_dataset.max_frames] for seq, o in zip(x_seq,offsets)],dim=0)
fake_exmpls = torch.stack([seq[o:o+train_dataset.max_frames] for seq, o in zip(seq_rec, offsets)], dim=0)
x_true = torch.cat([true_exmpls[:,1:],true_exmpls[:,:-1]],dim=2).reshape(-1,2*true_exmpls.shape[2],*true_exmpls.shape[3:])
x_fake = torch.cat([fake_exmpls[:, 1:], true_exmpls[:, :-1]], dim=2).reshape(-1, 2 * fake_exmpls.shape[2], *fake_exmpls.shape[3:])
else:
true_exmpls = np.random.choice(np.arange(x_seq.shape[0]*x_seq.shape[1]),self.config["gan"]["n_examples"])
fake_exmpls = np.random.choice(np.arange(seq_rec.shape[0]*seq_rec.shape[1]), self.config["gan"]["n_examples"])
x_true = x_seq.view(-1,*x_seq.shape[2:])[true_exmpls]
x_fake = seq_rec.view(-1,*seq_rec.shape[2:])[fake_exmpls]
disc_dict, loss_gen, loss_fmap = gan_trainer.train_step(x_true, x_fake)
loss_dyn = loss_dyn + self.config["gan"]["gen_weight"] * loss_gen + self.config["gan"]["fmap_weight"] * loss_fmap
if self.use_temp_disc and engine.state.iteration >= self.config["gan_temp"]["start_iteration"]:
seq_len_act = x_seq.shape[1]
offset = int(np.random.choice(np.arange(max(1,seq_len_act-train_dataset.max_frames)),1))
# offset_fake = int(np.random.choice(np.arange(max(1,seq_len_act-seq_len_temp_disc)), 1))
x_fake_tmp = seq_rec[:,offset:offset+train_dataset.max_frames].permute(0,2,1,3,4)
x_true_tmp = x_seq[:, offset:offset+train_dataset.max_frames].permute(0,2,1,3,4)
if self.config["gan_temp"]["conditional"]:
cond = get_patches(poke,weights,self.config["data"],test_dataset.weight_value_flow,self.logger) if test_dataset.flow_weights else poke
else:
cond = None
disc_dict_temp, loss_gen_temp, loss_fmap_temp = gan_trainer_temp.train_step(x_true_tmp,x_fake_tmp,cond)
loss_dyn = loss_dyn + self.config["gan_temp"]["gen_weight"] * loss_gen_temp + self.config["gan_temp"]["fmap_weight"] * loss_fmap_temp
# optimize parameters of dynamics part
optimizer_dyn.zero_grad()
loss_dyn.backward()
optimizer_dyn.step()
out_dict.update({"loss_dyn":loss_dyn.item() ,"vgg_loss_dyn" : ll_loss_dyn.item(), "latent_loss_dyn": latent_loss_dyn.item(), "lr_dec_t": self.lr_dec_t})
if self.use_gan and engine.state.iteration >= self.config["gan"]["start_iteration"]:
out_dict.update(disc_dict)
out_dict.update({"loss_gen_patch" :loss_gen.item(), "loss_fmap_patch": loss_fmap.item()})
if self.use_temp_disc and engine.state.iteration >= self.config["gan_temp"]["start_iteration"]:
out_dict.update(disc_dict_temp)
out_dict.update({"loss_gen_temp" :loss_gen_temp.item(), "loss_fmap_temp": loss_fmap_temp.item()})
return out_dict
self.logger.info("Initialize inception model...")
self.inception_model = FIDInceptionModel()
self.logger.info("Finished initialization of inception model...")
# note that lpips is exactly vgg-cosine similarity as proposed in the google papers and savp
self.lpips_fn = LPIPS(net="vgg")
def eval_step(engine, eval_batch):
net.eval()
out_dict = {}
with torch.no_grad():
# prepare data
weights = None
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:,0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:,1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
app_img_tr = eval_batch["app_img_random"].cuda(self.all_devices[0])
x_trans, *_ = net(app_img_tr, x_t, poke,len=0)
loss_style = style_loss(self.vgg, app_img_tr, x_trans)
out_dict.update({"style_loss_eval": loss_style.item()})
n_ref_frames = self.config["data"]["n_ref_frames"] - 1 if self.poke_scale_mode else train_dataset.max_frames -1
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_t_hat, sigma_t, _, alpha = net(x_t,x_t,poke,len=0)
x_seq_hat, _, sigmas_hat,_ = net(x_t, x_t, poke,len=seq_len,poke_linear=self.poke_scale_mode,
n_zero_frames=seq_len-n_ref_frames-1,poke_jump=self.poke_jump)
if weights is not None and self.config["testing"]["metrics_on_patches"]:
x_seq_hat = get_patches(x_seq_hat,weights,self.config["data"],test_dataset.weight_value_flow, logger=self.logger)
x_seq_gt = get_patches(x_seq_gt, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
sigmas_gt = []
ll_loss_dyn = []
rec_imgs = []
for n in range(seq_len):
x_hat_tn, s_tn, *_ = net(x_seq_gt[:, n], x_seq_gt[:, n], poke, len=0)
sigmas_gt.append(s_tn)
rec_imgs.append(x_hat_tn)
ll_dyn_n = vgg_loss_agg(self.vgg, x_seq_gt[:, n], x_seq_hat[:, n])
ll_loss_dyn.append(ll_dyn_n)
ll_loss_tk_eval = torch.stack(ll_loss_dyn,dim=0).mean()
rec_imgs = torch.stack(rec_imgs,1)
if weights is not None and self.config["testing"]["metrics_on_patches"]:
rec_imgs = get_patches(rec_imgs, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
# apply inception model for fid calculation at all timesteps
for t in range(x_seq_gt.shape[1]):
real_features_t = self.inception_model(x_seq_gt[:, t]).cpu().numpy()
fake_features_t = self.inception_model(x_seq_hat[:, t]).cpu().numpy()
if t not in self.fid_feats_fake_per_frame:
self.fid_feats_fake_per_frame.update({t: fake_features_t})
self.fid_feats_real_per_frame.update({t: real_features_t})
else:
self.fid_feats_fake_per_frame[t] = np.concatenate([self.fid_feats_fake_per_frame[t], fake_features_t], axis=0)
self.fid_feats_real_per_frame[t] = np.concatenate([self.fid_feats_real_per_frame[t], real_features_t], axis=0)
# evaluate training losses
# ll_loss_tk_eval = vgg_loss_agg(self.vgg, x_tk, x_tk_hat)
ll_loss_t_i_eval = vgg_loss_agg(self.vgg, x_t, x_t_hat)
dyn_losses = []
for s_tk, s_hat_tk in zip(sigmas_gt, sigmas_hat):
dyn_losses.append(latent_dynamics_loss(s_hat_tk, s_tk, []))
latent_loss_dyn_eval = torch.stack(dyn_losses).mean()
out_dict.update({"vgg_loss_dyn_eval": ll_loss_tk_eval.item(), "loss_dis_i_eval": ll_loss_t_i_eval.item(), "latent_loss_dyn_eval": latent_loss_dyn_eval.item()})
# compute metrics
ssim_t = ssim_lightning(x_t, x_t_hat)
psnr_t = psnr_lightning(x_t, x_t_hat)
lpips_t = metric_lpips(x_t,x_t_hat, self.lpips_fn, reduce=False)
ssim_tk, ssim_per_frame = ssim_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
psnr_tk, psnr_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
lpips_avg, lpips_per_frame = metric_lpips(x_seq_gt, x_seq_hat,self.lpips_fn,reduce=False,return_per_frame=True)
# ssim_pl, ssim_pl_per_frame = ssim_lightning(x_seq_gt,x_seq_hat,return_per_frame=True)
# psnr_pl, psnr_pl_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
# append to arrays
self.lpips["t"].append(lpips_t)
self.psnrs["t"].append(psnr_t)
self.ssims["t"].append(ssim_t)
self.psnrs["tk"].append(psnr_tk)
self.ssims["tk"].append(ssim_tk)
self.lpips["tk"].append(lpips_avg)
#self.ssims["pl"].append(ssim_pl)
#self.psnrs["pl"].append(psnr_pl)
# append the values of the respective sequence length
[self.ssims_per_frame[key].append(ssim_per_frame[key]) if key in self.ssims_per_frame else self.ssims_per_frame.update({key:[ssim_per_frame[key]]}) for key in ssim_per_frame]
[self.psnrs_per_frame[key].append(psnr_per_frame[key]) if key in self.psnrs_per_frame else self.psnrs_per_frame.update({key:[psnr_per_frame[key]]}) for key in psnr_per_frame]
[self.lpips_per_frame[key].append(lpips_per_frame[key]) if key in self.lpips_per_frame else self.lpips_per_frame.update({key:[lpips_per_frame[key]]}) for key in lpips_per_frame]
#[self.ssims_per_frame_pl[key].append(ssim_pl_per_frame[key]) if key in self.ssims_per_frame_pl else self.ssims_per_frame_pl.update({key: [ssim_pl_per_frame[key]]}) for key in ssim_pl_per_frame]
#[self.psnrs_per_frame_pl[key].append(psnr_pl_per_frame[key]) if key in self.psnrs_per_frame_pl else self.psnrs_per_frame_pl.update({key: [psnr_pl_per_frame[key]]}) for key in psnr_pl_per_frame]
return out_dict
# test_it steps are performed while generating test_imgs, there n_test_img is overall number divided by number of test iterations
n_test_img = int(self.config["testing"]["n_test_img"] // self.config["testing"]["test_it"])
def eval_visual(engine, eval_batch):
net.eval()
with torch.no_grad():
# prepare data
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1]
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
flow = eval_batch["flow"]
if self.config["architecture"]["disentanglement"]:
shape_img = eval_batch["img_aT"].cuda(self.all_devices[0])
else:
shape_img = x_t
n_ref_frames = self.config["data"]["n_ref_frames"] - 1 if self.poke_scale_mode else train_dataset.max_frames -1
seq_len = x_seq_gt.shape[1]
x_seq_hat, *_ = net(x_t,x_t, poke, len=seq_len,poke_linear=self.poke_scale_mode,n_zero_frames=seq_len-n_ref_frames-1, poke_jump = self.poke_jump)
x_t_hat , *_ = net(x_seq_gt[:,-1],shape_img,poke,len=0)
grid_dis = make_img_grid(x_seq_gt[:,-1],shape_img, x_t_hat,x_t, n_logged=n_test_img)
grid_dyn = make_flow_grid(x_t, poke, x_seq_hat[:,-1], x_seq_gt[:,-1], n_logged=n_test_img, flow=flow)
seq_vis_hat = torch.cat([x_t.unsqueeze(1), x_seq_hat], 1)
seq_vis_gt = torch.cat([x_t.unsqueeze(1), x_seq_gt], 1)
grid_anim = make_video(x_t,poke,seq_vis_hat,seq_vis_gt,n_logged=n_test_img,flow=flow, display_frame_nr=True)
it = engine.state.iteration
log_dict = {"Last Frame Comparison Test data": wandb.Image(grid_dyn, caption=f"Last frames test grid #{it}."),
"Disentanglement Grid Test Data": wandb.Image(grid_dis, caption=f"Test grid disentanglement #{it}."),
"Video Grid Test Data": wandb.Video(grid_anim,caption=f"Test Video Grid #{it}.",fps=5)}
if self.config["testing"]["eval_app_transfer"]:
app_img_unrelated = eval_batch["app_img_random"].cuda(self.all_devices[0])
x_transferred, *_ = net(app_img_unrelated,x_t, poke,len=0)
transfer_grid = make_img_grid(app_img_unrelated,x_t,x_transferred)
log_dict.update({"Appearance transfer grid Test Data": wandb.Image(transfer_grid, caption=f"Test_grid appearance transfer #{it}")})
wandb.log(log_dict)
return None
self.logger.info("Initialize engines...")
trainer = Engine(train_step)
evaluator = Engine(eval_step)
test_img_generator = Engine(eval_visual)
self.logger.info("Finish engine initialization...")
# checkpointing
self.logger.info("Add checkpointing and pbar...")
n_saved = 10
self.logger.info(f"Checkpoint saving window is {n_saved}")
ckpt_handler = ModelCheckpoint(self.dirs["ckpt"], "reg_ckpt", n_saved=n_saved, require_empty=False)
save_dict = {"model": net, "optimizer_dis": optimizer_dis, "optimizer_dyn": optimizer_dyn}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler,
save_dict)
if self.use_gan:
ckpt_handler_disc = ModelCheckpoint(self.dirs["ckpt"], gan_trainer.load_key, n_saved=10, require_empty=False)
save_dict_disc = {"model": gan_trainer.disc, "optimizer": gan_trainer.disc_opt}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler_disc,
save_dict_disc)
if self.use_temp_disc:
ckpt_handler_disc_temp = ModelCheckpoint(self.dirs["ckpt"], gan_trainer_temp.load_key, n_saved=10, require_empty=False)
save_dict_disc_temp = {"model": gan_trainer_temp.disc, "optimizer": gan_trainer_temp.disc_opt}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler_disc_temp,
save_dict_disc_temp)
pbar = ProgressBar(ascii=True)
pbar.attach(trainer, output_transform=lambda x: x)
pbar.attach(evaluator, output_transform=lambda x: x)
#reduce the learning rate of the decoder for the image reconstruction task, such that the model focusses more on t --> tk
@trainer.on(Events.ITERATION_COMPLETED)
def update_lr(engine):
self.lr_dec_t = lr_dec_rec(engine.state.iteration)
for g in optimizer_dis.param_groups:
if g["name"] == "decoder":
g["lr"] = self.lr_dec_t
@trainer.on(Events.ITERATION_COMPLETED(every=self.config["testing"]["log_intervall"]))
def log(engine):
it = engine.state.iteration
wandb.log({"iteration": it})
# log losses
for key in engine.state.output:
wandb.log({key: engine.state.output[key]})
data = engine.state.batch
if test_dataset.flow_weights:
poke = data["poke"][0].cuda(self.all_devices[0])
else:
poke = data["poke"].cuda(self.all_devices[0])
x_t = data["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = data["images"][:, 1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
shape_img = data["img_aT"].cuda(self.all_devices[0])
else:
shape_img = x_t
n_ref_frames = self.config["data"]["n_ref_frames"] - 1 if self.poke_scale_mode else train_dataset.max_frames -1
net.eval()
seq_len = x_seq_gt.shape[1]
with torch.no_grad():
x_seq_hat, *_ = net(x_t, x_t, poke, len=seq_len, poke_linear=self.poke_scale_mode, n_zero_frames=seq_len-n_ref_frames-1, poke_jump=self.poke_jump)
x_t_hat, *_ = net(x_seq_gt[:,-1], shape_img, poke,len=0)
#x_t_hat_e, *_ = net(img_aT, img_sT, poke)
grid_dis_i = make_img_grid(x_seq_gt[:,-1], shape_img, x_t_hat, x_t, n_logged=n_test_img)
grid_dyn = make_flow_grid(x_t, poke, x_seq_hat[:,-1], x_seq_gt[:,-1], n_logged=n_test_img)
seq_vis_hat = torch.cat([x_t.unsqueeze(1),x_seq_hat],1)
seq_vis_gt = torch.cat([x_t.unsqueeze(1), x_seq_gt], 1)
grid_anim = make_video(x_t,poke,seq_vis_hat,seq_vis_gt,n_logged=n_test_img, display_frame_nr=True)
wandb.log({"Last Frame Comparison Train Data": wandb.Image(grid_dyn, caption=f"Last frames train grid after {it} train steps."),
"Disentanglement Grid Invariance Train Data": wandb.Image(grid_dis_i, caption=f"Invariance Disentanglement Grid on train set after {it} train steps."),
"Video Grid Train Data": wandb.Video(grid_anim, caption=f"Train Video Grid after {it} train steps",fps=5)})
#"Disentanglement Grid Equivariance Train Data": wandb.Image(grid_dis_e, caption=f"Eqiuvariance Disentanglement Grid on train set after {it} train steps.")
self.logger.info("Initialize metrics...")
# compute loss average over epochs
# Average(output_transform=lambda x: x["loss_dis"]).attach(trainer, "loss_dis-epoch_avg")
if "singlestage" not in self.config["training"] or not self.config["training"]["singlestage"]:
Average(output_transform=lambda x: x["loss_dis"]).attach(trainer, "loss_dis-epoch_avg")
Average(output_transform=lambda x: x["loss_dis_i_eval"]).attach(evaluator, "loss_dis_i_eval")
Average(output_transform=lambda x: x["vgg_loss_dyn"]).attach(trainer, "vgg_loss_dyn-epoch_avg")
Average(output_transform=lambda x: x["latent_loss_dyn"]).attach(trainer, "latent_loss_dyn-epoch_avg")
if "disentanglement" in self.config["architecture"] and self.config["architecture"]["disentanglement"]:
Average(output_transform=lambda x: x["style_loss"]).attach(trainer, "style_loss-epoch_avg")
Average(output_transform=lambda x: x["style_loss_eval"]).attach(evaluator, "style_loss_eval")
if self.use_temp_disc or self.use_gan:
def gan_training_started(engine,epoch, key="gan"):
return engine.state.iteration >= self.config[key]["start_iteration"]
if self.use_gan:
use_patchgan_metrics = MetricUsage(started=Events.EPOCH_STARTED(event_filter=gan_training_started),
completed=Events.EPOCH_COMPLETED(event_filter=gan_training_started),
iteration_completed=Events.ITERATION_COMPLETED(event_filter=gan_training_started))
# gan losses
Average(output_transform=lambda x: x["loss_gen_patch"]).attach(trainer, "loss_gen_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["loss_fmap_patch"]).attach(trainer, "loss_fmap_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["loss_disc_patch"]).attach(trainer, "loss_disc_patch-epoch_avg",usage=use_patchgan_metrics)
#if self.config["gan"]["gp_weighflow_video_generatort"] > 0:
Average(output_transform=lambda x: x["loss_gp_patch"]).attach(trainer, "loss_gp_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["p_""true_patch"]).attach(trainer, "p_true_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["p_fake_patch"]).attach(trainer, "p_fake_patch-epoch_avg",usage=use_patchgan_metrics)
@trainer.on(Events.EPOCH_COMPLETED(event_filter=gan_training_started))
def gan_stuff(engine):
gan_trainer.disc_scheduler.step()
if self.use_temp_disc:
use_tmpgan_metrics = MetricUsage(started=Events.EPOCH_STARTED(event_filter=partial(gan_training_started,key="gan_temp")),
completed=Events.EPOCH_COMPLETED(event_filter=partial(gan_training_started,key="gan_temp")),
iteration_completed=Events.ITERATION_COMPLETED(event_filter=partial(gan_training_started,key="gan_temp")))
# gan losses
Average(output_transform=lambda x: x["loss_gen_temp"]).attach(trainer, "loss_gen_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["loss_fmap_temp"]).attach(trainer, "loss_fmap_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["loss_disc_temp"]).attach(trainer, "loss_disc_temp-epoch_avg",usage=use_tmpgan_metrics)
#if self.config["gan"]["gp_weight"] > 0:
Average(output_transform=lambda x: x["loss_gp_temp"]).attach(trainer, "loss_gp_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["p_true_temp"]).attach(trainer, "p_true_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["p_fake_temp"]).attach(trainer, "p_fake_temp-epoch_avg",usage=use_tmpgan_metrics)
@trainer.on(Events.EPOCH_COMPLETED(event_filter=gan_training_started))
def temp_disc_stuff(engine):
gan_trainer_temp.disc_scheduler.step()
# evaluation losses
Average(output_transform=lambda x: x["vgg_loss_dyn_eval"]).attach(evaluator, "vgg_loss_dyn_eval")
Average(output_transform=lambda x: x["latent_loss_dyn_eval"]).attach(evaluator, "latent_loss_dyn_eval")
self.logger.info("Finish metric initialization.")
@trainer.on(Events.EPOCH_COMPLETED(every=self.config["testing"]["n_epoch_metrics"]))
def metrics(engine):
# set incpetion model to cpu
self.inception_model.eval()
self.inception_model.cuda(self.all_devices[0])
self.lpips_fn.cuda(self.all_devices[0])
self.lpips_fn.eval()
if self.config["gan_temp"]["use"]:
gan_trainer_temp.disc.cpu()
if self.config["gan"]["use"]:
gan_trainer.disc.cpu()
# compute metrics over an epoch
self.logger.info(f"Computing metrics after epoch #{engine.state.epoch}")
batch_size = eval_sampler.batch_size if self.config["training"]["custom_sampler"] else eval_loader.batch_size
bs = 20 if self.is_debug else (int(8000 / batch_size) if len(test_dataset) > 8000 else len(eval_loader))
evaluator.run(eval_loader, max_epochs=1, epoch_length=bs)
[wandb.log({key: evaluator.state.metrics[key]}) for key in evaluator.state.metrics]
# compute metrics
test = np.stack(self.ssims["t"], axis=0)
ssim_t = np.mean(np.stack(self.ssims["t"], axis=0))
psnr_t = np.mean(np.stack(self.psnrs["t"], axis=0))
lpips_t = np.mean(np.concatenate(self.lpips["t"], axis=0))
ssim_tk = np.mean(np.stack(self.ssims["tk"], axis=0))
psnr_tk = np.mean(np.stack(self.psnrs["tk"], axis=0))
lpips_avg = np.mean(np.concatenate(self.lpips["tk"], axis=0))
self.lpips_avg = lpips_avg
fid_per_frame = {}
for key in tqdm(self.fid_feats_real_per_frame, desc="Computing FID per frame"):
fid_per_frame[key] = metric_fid(self.fid_feats_real_per_frame[key], self.fid_feats_fake_per_frame[key])
#fid_tk = metric_fid(self.features_real_fid["tk"], self.features_fake_fid["tk"])
fid_avg = np.mean([fid_per_frame[key] for key in fid_per_frame])
log_dict = {"ssim-t": ssim_t, "psnr-t": psnr_t, "lpips-t": lpips_t,"ssim-tk": ssim_tk, "psnr-tk": psnr_tk, "fid-tk": fid_avg, "lpips-avg": lpips_avg}
# add histograms for per-frame-metrics
self.lpips_per_frame = {key: np.concatenate(self.lpips_per_frame[key], axis=0).mean() for key in self.lpips_per_frame}
self.ssims_per_frame = {key: np.stack(self.ssims_per_frame[key], axis=0).mean() for key in self.ssims_per_frame}
self.psnrs_per_frame = {key: np.stack(self.psnrs_per_frame[key], axis=0).mean() for key in self.psnrs_per_frame}
# self.ssims_per_frame_pl = {key: np.stack(self.ssims_per_frame_pl[key], axis=0).mean() for key in self.ssims_per_frame_pl}
# self.psnrs_per_frame_pl = {key: np.stack(self.psnrs_per_frame_pl[key], axis=0).mean() for key in self.psnrs_per_frame_pl}
x = [k + 1 for k in self.lpips_per_frame]
make_plot(x, list(self.lpips_per_frame.values()), "LPIPS of predicted frames", ylabel="Average LPIPS")
make_plot(x, list(self.ssims_per_frame.values()), "SSIM of predicted frames", ylabel="Average SSIM")
make_plot(x, list(self.psnrs_per_frame.values()), "PSNR of predicted frames", ylabel="Average PSNR")
make_plot(x, list(fid_per_frame.values()), "FIDs of predicted frames", ylabel="FID")
wandb.log(log_dict)
# clear collection arrays
self.__clear_metric_arrs()
self.inception_model.cpu()
self.lpips_fn.cpu()
if self.config["gan_temp"]["use"]:
gan_trainer_temp.disc.cuda(self.all_devices[0])
if self.config["gan"]["use"]:
gan_trainer.disc.cuda(self.all_devices[0])
# set
#toggle_gpu(True)
@trainer.on(Events.ITERATION_COMPLETED(every=self.config["testing"]["test_img_intervall"]))
def make_test_grid(engine):
test_img_generator.run(test_loader, max_epochs=1, epoch_length=self.config["testing"]["test_it"])
@trainer.on(Events.EPOCH_COMPLETED)
def log_train_avg(engine):
wandb.log({"epoch": engine.state.epoch})
[wandb.log({key: engine.state.metrics[key]}) for key in engine.state.metrics]
# also perform scheduler step
scheduler_dis.step()
scheduler_dyn.step()
def score_fn(engine):
assert self.lpips_avg is not None
return -self.lpips_avg
# define best ckpt
best_ckpt_handler = ModelCheckpoint(self.dirs["ckpt"],filename_prefix="ckpt_metric" ,score_function=score_fn,score_name="lpips",n_saved=5,require_empty=False)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=self.config["testing"]["n_epoch_metrics"]),best_ckpt_handler,save_dict)
@trainer.on(Events.STARTED)
def set_start_it(engine):
self.logger.info(f'Engine starting from iteration {start_it}, epoch {start_epoch}')
engine.state.iteration = start_it
engine.state.epoch = start_epoch
# run everything
n_step_per_epoch = 10 if self.is_debug else len(train_loader)
self.logger.info("Start training...")
trainer.run(train_loader, max_epochs=n_epoch_train, epoch_length=n_step_per_epoch)
self.logger.info("End training.")
def test(self):
from tqdm import tqdm
import cv2
from os import makedirs,path
mod_ckpt, _ = self._load_ckpt("reg_ckpt", single_opt=False)
dataset, transforms = get_dataset(config=self.config["data"])
test_dataset = dataset(transforms, self.datakeys, self.config["data"], train=False)
# get datasets for training and testing
def w_init_fn(worker_id):
return np.random.seed(np.random.get_state()[1][0] + worker_id)
if self.custom_sampler:
test_sampler = FixedLengthSampler(test_dataset, batch_size=self.config["testing"]["test_batch_size"], shuffle=True,
drop_last=True, weighting=test_dataset.obj_weighting, zero_poke=False)
test_loader = DataLoader(
test_dataset,
batch_sampler=test_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"], #
worker_init_fn=w_init_fn,
)
self.logger.info("Using custom data sampler")
else:
test_sampler = RandomSampler(test_dataset, )
test_loader = DataLoader(test_dataset,
sampler=test_sampler,
batch_size=16,
num_workers=self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,
drop_last=True)
self.logger.info("Using common torch sampler")
# define model
self.logger.info(f"Sequence length is {test_dataset.max_frames}")
self.logger.info(f"Load model...")
net_model = SkipSequenceModel if self.config["architecture"]["use_skip_model"] else SingleScaleBaseline
net = net_model(spatial_size=self.config["data"]["spatial_size"],
config=self.config["architecture"], )
weights = [5, 5, 0]
self.logger.info(
f"Number of trainable parameters in model is {sum(p.numel() for p in net.parameters())}"
)
net.load_state_dict(mod_ckpt)
net.cuda(self.all_devices[0])
self.logger.info("Model on gpu!")
net.eval()
if self.config["testing"]["mode"] == "metrics":
fid_feats_real_per_frame = {}
fid_feats_fake_per_frame = {}
def metric_step(engine, eval_batch):
net.eval()
out_dict = {}
with torch.no_grad():
# prepare data
weights = None
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
n_ref_frames = self.config["data"]["n_ref_frames"] -1 if "n_ref_frames" in self.config["data"] else self.config["data"]["max_frames"]
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_t_hat, sigma_t, _, alpha = net(x_t, x_t, poke, len=0)
x_seq_hat, _, sigmas_hat, _ = net(x_t, x_t, poke, len=seq_len, poke_linear=self.poke_scale_mode,
n_zero_frames=seq_len-n_ref_frames-1, poke_jump=self.poke_jump)
if weights is not None and self.config["testing"]["metrics_on_patches"]:
x_seq_hat = get_patches(x_seq_hat, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
x_seq_gt = get_patches(x_seq_gt, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
# apply inception model for fid calculation at time t+k
for t in range(x_seq_gt.shape[1]):
real_features_t = self.inception_model(x_seq_gt[:, t]).cpu().numpy()
fake_features_t = self.inception_model(x_seq_hat[:, t]).cpu().numpy()
if t not in fid_feats_fake_per_frame:
fid_feats_fake_per_frame.update({t: fake_features_t})
fid_feats_real_per_frame.update({t: real_features_t})
else:
fid_feats_fake_per_frame[t] = np.concatenate([fid_feats_fake_per_frame[t], fake_features_t], axis=0)
fid_feats_real_per_frame[t] = np.concatenate([fid_feats_real_per_frame[t], real_features_t], axis=0)
ssim_tk, ssim_per_frame = ssim_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
psnr_tk, psnr_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
lpips_avg, lpips_per_frame = metric_lpips(x_seq_gt, x_seq_hat, self.lpips_fn, reduce=False, return_per_frame=True)
# append to arrays
self.psnrs["tk"].append(psnr_tk)
self.ssims["tk"].append(ssim_tk)
self.lpips["tk"].append(lpips_avg)
# append the values of the respective sequence length
[self.ssims_per_frame[key].append(ssim_per_frame[key]) if key in self.ssims_per_frame else self.ssims_per_frame.update({key: [ssim_per_frame[key]]}) for key in ssim_per_frame]
[self.psnrs_per_frame[key].append(psnr_per_frame[key]) if key in self.psnrs_per_frame else self.psnrs_per_frame.update({key: [psnr_per_frame[key]]}) for key in psnr_per_frame]
[self.lpips_per_frame[key].append(lpips_per_frame[key]) if key in self.lpips_per_frame else self.lpips_per_frame.update({key: [lpips_per_frame[key]]}) for key in lpips_per_frame]
return out_dict
evaluator = Engine(metric_step)
self.logger.info("Initialize inception model...")
self.inception_model = FIDInceptionModel()
self.logger.info("Finished initialization of inception model...")
# note that lpips is exactly vgg-cosine similarity as proposed in the google papers and savp
self.lpips_fn = LPIPS(net="vgg")
pbar = ProgressBar(ascii=True)
pbar.attach(evaluator, output_transform=lambda x: x)
# set incpetion model to cpu
self.inception_model.eval()
self.inception_model.cuda(self.all_devices[0])
self.lpips_fn.cuda(self.all_devices[0])
self.lpips_fn.eval()
# compute metrics over an epoch
self.logger.info(f"Start metrics computation.")
batch_size = test_sampler.batch_size if self.custom_sampler else test_loader.batch_size
el = (int(8000 / batch_size) if len(test_dataset) > 8000 else len(test_loader))
evaluator.run(test_loader, max_epochs=1, epoch_length=el)
# [wandb.log({key: evaluator.state.metrics[key]}) for key in evaluator.state.metrics]
# compute metrics
ssim_tk = np.mean(np.stack(self.ssims["tk"], axis=0))
psnr_tk = np.mean(np.stack(self.psnrs["tk"], axis=0))
lpips_avg = np.mean(np.concatenate(self.lpips["tk"], axis=0))
assert list(fid_feats_real_per_frame.keys()) == list(fid_feats_fake_per_frame.keys())
fid_per_frame = {}
for key in tqdm(fid_feats_real_per_frame, desc="Computing FID per frame"):
fid_per_frame[key] = metric_fid(fid_feats_real_per_frame[key], fid_feats_fake_per_frame[key])
# fid_tk = metric_fid(self.features_real_fid["tk"], self.features_fake_fid["tk"])
fid_avg = np.mean([fid_per_frame[key] for key in fid_per_frame])
log_dict = {"ssim-avg-temp": ssim_tk, "psnr-avg_temp": psnr_tk, "fid-avg_temp": fid_avg, "lpips-avg-temp": lpips_avg}
# add histograms for per-frame-metrics
self.lpips_per_frame = {key: np.concatenate(self.lpips_per_frame[key], axis=0).mean() for key in self.lpips_per_frame}
self.ssims_per_frame = {key: np.stack(self.ssims_per_frame[key], axis=0).mean() for key in self.ssims_per_frame}
self.psnrs_per_frame = {key: np.stack(self.psnrs_per_frame[key], axis=0).mean() for key in self.psnrs_per_frame}
savedir = path.join(self.dirs["generated"], "metric_summaries")
makedirs(savedir, exist_ok=True)
x = [k + 1 for k in self.lpips_per_frame]
make_plot(x, list(self.lpips_per_frame.values()), "LPIPS of predicted frames", ylabel="Average LPIPS", savename=path.join(savedir, "lpips.svg"))
make_plot(x, list(self.ssims_per_frame.values()), "SSIM of predicted frames", ylabel="Average SSIM", savename=path.join(savedir, "ssim.svg"))
make_plot(x, list(self.psnrs_per_frame.values()), "PSNR of predicted frames", ylabel="Average PSNR", savename=path.join(savedir, "psnr.svg"))
make_plot(x, list(fid_per_frame.values()), "FIDs of predicted frames", ylabel="FID", savename=path.join(savedir, "fid.svg"))
self.logger.info("Averaged metrics: ")
for key in log_dict:
self.logger.info(f'{key}: {log_dict[key]}')
elif self.config["testing"]["mode"] == "fvd":
batch_size = test_sampler.batch_size if self.custom_sampler else test_loader.batch_size
el = (int(1000 / batch_size) if len(test_dataset) > 1000 else len(test_loader))
real_samples = []
fake_samples = []
real_samples_out = []
def generate_vids(engine, eval_batch):
net.eval()
with torch.no_grad():
# prepare data
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
if engine.state.iteration < el:
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
n_ref_frames = self.config["data"]["n_ref_frames"] -1 if "n_ref_frames" in self.config["data"] else self.config["data"]["max_frames"]
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_seq_hat, *_ = net(x_t, x_t, poke, len=seq_len,poke_linear=self.poke_scale_mode,
n_zero_frames=seq_len-n_ref_frames-1, poke_jump=self.poke_jump)
real_batch = ((x_seq_gt + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy().astype(np.uint8)
fake_batch = ((x_seq_hat + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy().astype(np.uint8)
real_samples.append(real_batch)
fake_samples.append(fake_batch)
else:
real_batch = ((eval_batch["images"][:, 1:] + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy().astype(np.uint8)
real_samples_out.append(real_batch)
generator = Engine(generate_vids)
pbar = ProgressBar(ascii=True)
pbar.attach(generator, output_transform=lambda x: x)
self.logger.info(f"Start collecting sequences for fvd computation...")
generator.run(test_loader, max_epochs=1, epoch_length=el)
savedir = path.join(self.dirs["generated"], "samples_fvd")
savedir_exmpls = path.join(savedir,"vid_examples")
makedirs(savedir, exist_ok=True)
makedirs(savedir_exmpls, exist_ok=True)
real_samples = np.stack(real_samples, axis=0)
fake_samples = np.stack(fake_samples, axis=0)
real_samples_out = np.stack(real_samples_out, axis=0)
n_ex = 0
self.logger.info(f"Generating example videos")
for i,(r,f) in enumerate(zip(real_samples,fake_samples)):
savename = path.join(savedir_exmpls,f"sample{i}.mp4")
r = np.concatenate([v for v in r],axis=2)
f = np.concatenate([v for v in f],axis=2)
all = np.concatenate([r,f],axis=1)
writer = cv2.VideoWriter(
savename,
cv2.VideoWriter_fourcc(*"MP4V"),
5,
(all.shape[2], all.shape[1]),
)
# writer = vio.FFmpegWriter(savename,inputdict=inputdict,outputdict=outputdict)
for frame in all:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
writer.write(frame)
writer.release()
n_ex+=1
if n_ex > 20:
break
self.logger.info(f"Saving samples to {savedir}")
np.save(path.join(savedir, "real_samples.npy"), real_samples)
np.save(path.join(savedir, "fake_samples.npy"), fake_samples)
self.logger.info(f'Finish generation of vid samples.') | 57,839 | 54.776278 | 210 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/experiments/sequence_model.py | import torch
from torch.utils.data import DataLoader
from torch.optim import Adam
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.contrib.handlers import ProgressBar
from ignite.metrics import Average, MetricUsage
import numpy as np
import wandb
from functools import partial
from lpips import LPIPS
from tqdm import tqdm
from experiments.experiment import Experiment
from data import get_dataset
from data.samplers import SequenceLengthSampler
from models.latent_flow_net import ResidualSequenceBaseline,SkipSequenceModel
from models.discriminator import GANTrainer
from utils.losses import PerceptualVGG,vgg_loss_agg,DynamicsLoss, pixel_triplet_loss, style_loss, PixelDynamicsLoss, kl_loss
from utils.testing import make_flow_grid, make_img_grid, make_video, make_hist, make_plot
from utils.metrics import metric_fid, FIDInceptionModel, metric_lpips, psnr_lightning, ssim_lightning
from utils.general import linear_var, get_member, get_patches
class SequencePokeModel(Experiment):
def __init__(self, config, dirs, device):
super().__init__(config, dirs, device)
self.datakeys = ["images","poke"]
if self.config["architecture"]["disentanglement"]:
self.datakeys.append("img_aT")
self.datakeys.append("app_img_random")
if self.config["gan_temp"]["conditional"]:
self.datakeys.append("flow")
# used for efficient metrics computation
self.fid_feats_fake_per_frame = {}
self.fid_feats_real_per_frame = {}
self.psnrs = {"t": [], "tk": [], "pl" : []}
self.ssims = {"t": [], "tk": [], "pl" : []}
self.lpips = {"t": [], "tk": []}
self.fvd_vids_real = []
self.fvd_vids_fake = []
self.use_gan = self.config["gan"]["use"]
self.use_temp_disc = self.config["gan_temp"]["use"]
#self.pixel_decoder_loss = self.config["training"]["pixel_dynamics_weight"] > 0
self.lr_dec_t = 0
self.target_dev = None
# metrics for each frame
self.ssims_per_frame = {}
self.lpips_per_frame = {}
self.psnrs_per_frame = {}
# self.ssims_per_frame_pl = {}
# self.psnrs_per_frame_pl = {}
self.lpips_avg = None
self.use_norm_loss = self.config["training"]["norm_loss_weight"] > 0 if "norm_loss_weight" in self.config["training"] else False
if self.use_norm_loss:
assert not self.config["architecture"]["dynamics_var"]
def __clear_metric_arrs(self):
[self.psnrs[key].clear() for key in self.psnrs]
[self.ssims[key].clear() for key in self.ssims]
[self.lpips[key].clear() for key in self.lpips]
self.fvd_vids_real.clear()
self.fvd_vids_fake.clear()
self.lpips_per_frame = {}
self.psnrs_per_frame = {}
self.ssims_per_frame = {}
self.fid_feats_fake_per_frame = {}
self.fid_feats_real_per_frame = {}
# self.ssims_per_frame_pl = {}
# self.psnrs_per_frame_pl = {}
def train(self):
########## checkpoints ##########
if self.config["general"]["restart"] and not self.is_debug:
mod_ckpt, op_ckpts = self._load_ckpt("reg_ckpt", single_opt=not self.config["training"]["two_stage"])
if self.config["training"]["two_stage"]:
op_ckpt_dis = op_ckpts["optimizer_dis"]
op_ckpt_dyn = op_ckpts["optimizer_dyn"]
else:
op_ckpt_dyn = op_ckpts
else:
mod_ckpt = op_ckpt_dis = op_ckpt_dyn = None
# get datasets for training and testing
def w_init_fn(worker_id):
return np.random.seed(np.random.get_state()[1][0] + worker_id)
dataset, transforms = get_dataset(config=self.config["data"])
train_dataset = dataset(transforms, self.datakeys, self.config["data"], train=True)
test_datakeys = self.datakeys + ["app_img_random"] if self.config["testing"]["eval_app_transfer"] and "app_img_random" not in self.datakeys else self.datakeys
test_datakeys.append("flow")
test_dataset = dataset(transforms, test_datakeys, self.config["data"], train=False)
train_sampler = SequenceLengthSampler(train_dataset,
batch_size=self.config["training"]["batch_size"],
shuffle=True,
drop_last=True,
zero_poke=self.config["data"]["include_zeropoke"])
train_loader = DataLoader(
train_dataset,
batch_sampler=train_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn
)
test_sampler = SequenceLengthSampler(test_dataset,
batch_size=self.config["training"]["batch_size"],
shuffle=True,
drop_last=True,
zero_poke=self.config["data"]["include_zeropoke"])
test_loader = DataLoader(
test_dataset,
batch_sampler=test_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn
)
# n_eval_frames = int(test_dataset.max_frames/2)
# if int(test_dataset.min_frames + n_eval_frames) % 2 != 0:
# n_eval_frames+=1
# no zeropoke for evaluation as zeropoke is only to ensure no reaction when poking outside
eval_sampler = SequenceLengthSampler(test_dataset,
batch_size=self.config["testing"]["test_batch_size"],
shuffle=False,
drop_last=True,
zero_poke = False
)
eval_loader = DataLoader(test_dataset,
batch_sampler=eval_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn )
# define model
self.logger.info(f"Load model...")
net_model = SkipSequenceModel if self.config["architecture"]["use_skip_model"] else ResidualSequenceBaseline
net = net_model(spatial_size=self.config["data"]["spatial_size"],
config=self.config["architecture"],)
self.logger.info(
f"Number of trainable parameters in model is {sum(p.numel() for p in net.parameters())}"
)
if self.config["general"]["restart"] and mod_ckpt is not None:
self.logger.info("Load pretrained paramaters and resume training.")
net.load_state_dict(mod_ckpt)
if self.parallel:
net = torch.nn.DataParallel(net, device_ids=self.all_devices)
net.cuda(self.all_devices[0])
self.logger.info("Model on gpu!")
# log weights and gradients
wandb.watch(net, log="all")
# define optimizers
# appearance and shape disentanglement
if self.config["training"]["two_stage"]:
dis_params = [{"params": get_member(net,"shape_enc").parameters(), "name": "shape_encoder"},
{"params": get_member(net,"dec").parameters(), "name": "decoder"}
]
if self.config["architecture"]["disentanglement"]:
dis_params.append({"params": net.appearance_enc.parameters(), "name": "appearance_encoder", },)
optimizer_dis = Adam(dis_params, lr=self.config["training"]["lr"])
if self.config["general"]["restart"] and op_ckpt_dis is not None:
self.logger.info("Load state_dict of optimizer.")
optimizer_dis.load_state_dict(op_ckpt_dis)
milestones = [int(self.config["training"]["n_epochs"] * t) for t in self.config["training"]["tau"]]
scheduler_dis = torch.optim.lr_scheduler.MultiStepLR(optimizer_dis, milestones=milestones, gamma=self.config["training"]["lr_reduce"])
# dynamics
dyn_params = [{"params": get_member(net,"dynamics_enc").parameters(), "name": "dynamics_encoder", },
{"params": get_member(net,"fusion_block").parameters(), "name": "fusion_block",},]
if self.config["training"]["decoder_update_tk"] or not self.config["training"]["two_stage"]:
dyn_params.append({"params": get_member(net,"dec").parameters(), "name": "decoder"})
if not self.config["training"]["two_stage"]:
dyn_params.append({"params": get_member(net,"shape_enc").parameters(), "name": "shape_encoder"})
optimizer_dyn = Adam(dyn_params, lr = self.config["training"]["lr"])
if self.config["general"]["restart"] and op_ckpt_dyn is not None:
self.logger.info("Load state_dict of optimizer.")
optimizer_dyn.load_state_dict(op_ckpt_dyn)
milestones = [int(self.config["training"]["n_epochs"] * t) for t in self.config["training"]["tau"]]
scheduler_dyn = torch.optim.lr_scheduler.MultiStepLR(optimizer_dyn, milestones=milestones, gamma=self.config["training"]["lr_reduce"])
# initialize disc if gan mode is enabled
if self.use_gan:
gan_trainer = GANTrainer(self.config, self._load_ckpt, self.logger,spatial_size=self.config["data"]["spatial_size"][0] ,
parallel=self.parallel, devices=self.all_devices, debug=self.is_debug)
if self.use_temp_disc:
gan_trainer_temp = GANTrainer(self.config, self._load_ckpt,self.logger,spatial_size=self.config["data"]["spatial_size"][0],
parallel=self.parallel,devices=self.all_devices, debug=self.is_debug,temporal=True, sequence_length=train_dataset.min_frames)
# set start iteration and epoch in case model training is resumed
start_it = 0
start_epoch = 0
n_epoch_train = self.config["training"]["n_epochs"]
n_epoch_overall = self.config["training"]["n_epochs"]
if self.config["general"]["restart"] and op_ckpts is not None:
start_it = list(optimizer_dyn.state_dict()["state"].values())[-1]["step"]
start_epoch = int(np.floor(start_it / len(train_loader)))
assert self.config["training"]["n_epochs"] > start_epoch
n_epoch_train = self.config["training"]["n_epochs"] - start_epoch
#
lr_dec_rec = partial(linear_var,start_it=0,
end_it=self.config["training"]["lr_dec_end_it"],
start_val=self.config["training"]["lr"],
end_val=self.config["training"]["lr_dec_end_val"],
clip_min=0,
clip_max=self.config["training"]["lr"],)
self.lr_dec_t = lr_dec_rec(start_it)
# losses
self.logger.info("Load VGG")
self.vgg = PerceptualVGG()
if self.parallel:
self.vgg = torch.nn.DataParallel(self.vgg,device_ids=self.all_devices)
self.vgg.cuda(self.all_devices[0])
self.logger.info("VGG on gpu")
# from torchsummary import summary
# summary(vgg.vgg,(3,224,224))
self.logger.info("Initialize persistent losses")
latent_dynamics_loss = DynamicsLoss(config=self.config["training"])
pixel_dynamics_loss = partial(pixel_triplet_loss,vgg = self.vgg, diff_pp=self.config["training"]["pixel_dyn_spatial"]) if self.config["training"]["pixel_dynamics_vgg"] else PixelDynamicsLoss()
self.logger.info("Finished initializing persistent losses.")
def train_step(engine,batch):
net.train()
# prepare data
weights=None
loss_dis = 0
out_dict = {}
if train_dataset.flow_weights:
poke = batch["poke"][0].cuda(self.all_devices[0])
weights = batch["poke"][1].cuda(self.all_devices[0])
else:
poke = batch["poke"].cuda(self.all_devices[0])
x_t = batch["images"][:, 0].cuda(self.all_devices[0])
x_seq = batch["images"][:, 1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
shape_img = batch["img_aT"].cuda(self.all_devices[0])
# apply style loss
app_img_tr = batch["app_img_random"].cuda(self.all_devices[0])
x_trans, *_ = net(app_img_tr,x_t,poke,len=0)
loss_style = style_loss(self.vgg,app_img_tr,x_trans)
loss_dis = self.config["training"]["style_loss_weight"] * loss_style
out_dict.update({"style_loss": loss_style.item()})
else:
shape_img = x_t
x_t_hat_i, sigma_t, _, alpha = net(x_seq[:, -1], shape_img, poke, len=0)
# disentanglement loss
loss_dis = loss_dis + vgg_loss_agg(self.vgg, x_t, x_t_hat_i)
if engine.state.epoch <= self.config["training"]["stop_seq_stat"] and self.config["training"]["two_stage"]:
#optimize parameter of appearance, shape encoders and decoder
optimizer_dis.zero_grad()
loss_dis.backward()
optimizer_dis.step()
out_dict.update({"loss_dis" : loss_dis.item()})
# forward pass for training of dynamics part of the model
# dynamics losses
seq_len = x_seq.shape[1]
seq_rec, mu_delta, sigmas_hat, logstd_delta = net(x_t,shape_img,poke,len=seq_len)
sigmas_gt = []
ll_loss_dyn = []
rec_imgs = []
if weights is not None:
seq_rec = get_patches(seq_rec,weights,self.config["data"],train_dataset.weight_value_flow, logger=self.logger)
x_seq = get_patches(x_seq,weights,self.config["data"],train_dataset.weight_value_flow, logger=self.logger)
for n in range(seq_len):
x_hat_tn,s_tn,*_ = net(x_seq[:,n],x_seq[:,n],poke,len=0)
sigmas_gt.append(s_tn)
rec_imgs.append(x_hat_tn)
ll_dyn_n = vgg_loss_agg(self.vgg,x_seq[:,n],seq_rec[:,n])
ll_loss_dyn.append(ll_dyn_n)
ll_loss_dyn = torch.stack(ll_loss_dyn,dim=0).mean()
rec_imgs = torch.stack(rec_imgs,1)
if weights is not None:
rec_imgs = get_patches(rec_imgs,weights,self.config["data"],train_dataset.weight_value_flow, logger=self.logger)
#latent dynamics
dyn_losses = []
for s_tk,s_hat_tk in zip(sigmas_gt,sigmas_hat):
dyn_losses.append(latent_dynamics_loss(s_hat_tk,s_tk,[]))
latent_loss_dyn = torch.stack(dyn_losses).mean()
loss_dyn = self.config["training"]["vgg_dyn_weight"] * ll_loss_dyn + self.config["training"]["latent_dynamics_weight"] * latent_loss_dyn
if self.use_norm_loss:
poke_norms = []
for p in poke:
magns = p.norm(dim=0)
ids = magns.nonzero(as_tuple=True)
if ids[0].shape[0] > 0:
poke_norms.append(magns[ids].mean().unsqueeze(0))
else:
poke_norms.append(torch.zeros(1).cuda(self.all_devices[0]))
poke_norms = torch.cat(poke_norms, 0)
norm_loss = ((poke_norms - mu_delta.reshape(poke_norms.shape[0], -1).norm(dim=-1)) ** 2).mean()
loss_dyn = loss_dyn + self.config["training"]["norm_loss_weight"] * norm_loss
out_dict.update({"norm_loss": norm_loss.item()})
# kl loss for
if self.config["architecture"]["dynamics_var"]:
kl_dyn = kl_loss(mu_delta,logstd_delta)
loss_dyn = self.config["training"]["kl_weight"] * kl_dyn
out_dict.update({"kl_dyn": kl_dyn.item()})
# pixel dynamics
loss_dec_dyn = []
for n in range(seq_len-1):
loss_dec_dyn_tn = pixel_dynamics_loss(x_seq[:,n],x_seq[:,n+1],rec_imgs[:,n],seq_rec[:,n+1])
loss_dec_dyn.append(loss_dec_dyn_tn)
loss_dec_dyn = torch.stack(loss_dec_dyn,dim=0).mean()
loss_dyn = loss_dyn + self.config["training"]["pixel_dynamics_weight"] * loss_dec_dyn
if self.use_gan and engine.state.iteration >= self.config["gan"]["start_iteration"]:
if self.config["gan"]["pixel_dynamics"]:
offsets = np.random.choice(np.arange(max(1,x_seq.shape[1]-train_dataset.min_frames)),size=x_seq.shape[0])
true_exmpls = torch.stack([seq[o:o+train_dataset.min_frames] for seq, o in zip(x_seq,offsets)],dim=0)
fake_exmpls = torch.stack([seq[o:o+train_dataset.min_frames] for seq, o in zip(seq_rec, offsets)], dim=0)
x_true = torch.cat([true_exmpls[:,1:],true_exmpls[:,:-1]],dim=2).reshape(-1,2*true_exmpls.shape[2],*true_exmpls.shape[3:])
x_fake = torch.cat([fake_exmpls[:, 1:], true_exmpls[:, :-1]], dim=2).reshape(-1, 2 * fake_exmpls.shape[2], *fake_exmpls.shape[3:])
else:
true_exmpls = np.random.choice(np.arange(x_seq.shape[0]*x_seq.shape[1]),self.config["gan"]["n_examples"])
fake_exmpls = np.random.choice(np.arange(seq_rec.shape[0]*seq_rec.shape[1]), self.config["gan"]["n_examples"])
x_true = x_seq.view(-1,*x_seq.shape[2:])[true_exmpls]
x_fake = seq_rec.view(-1,*seq_rec.shape[2:])[fake_exmpls]
disc_dict, loss_gen, loss_fmap = gan_trainer.train_step(x_true, x_fake)
loss_dyn = loss_dyn + self.config["gan"]["gen_weight"] * loss_gen + self.config["gan"]["fmap_weight"] * loss_fmap
if self.use_temp_disc and engine.state.iteration >= self.config["gan_temp"]["start_iteration"]:
seq_len_act = x_seq.shape[1]
offset = int(np.random.choice(np.arange(max(1,seq_len_act-train_dataset.min_frames)),1))
# offset_fake = int(np.random.choice(np.arange(max(1,seq_len_act-seq_len_temp_disc)), 1))
x_fake_tmp = seq_rec[:,offset:offset+train_dataset.min_frames].permute(0,2,1,3,4)
x_true_tmp = x_seq[:, offset:offset+train_dataset.min_frames].permute(0,2,1,3,4)
if self.config["gan_temp"]["conditional"]:
flow = batch["flow"].cuda(self.all_devices[0])
cond = get_patches(flow,weights,self.config["data"],test_dataset.weight_value_flow,self.logger) if test_dataset.flow_weights else flow
else:
cond = None
disc_dict_temp, loss_gen_temp, loss_fmap_temp = gan_trainer_temp.train_step(x_true_tmp,x_fake_tmp,cond)
loss_dyn = loss_dyn + self.config["gan_temp"]["gen_weight"] * loss_gen_temp + self.config["gan_temp"]["fmap_weight"] * loss_fmap_temp
# optimize parameters of dynamics part
optimizer_dyn.zero_grad()
loss_dyn.backward()
optimizer_dyn.step()
out_dict.update({"loss_dyn":loss_dyn.item() ,"vgg_loss_dyn" : ll_loss_dyn.item(), "latent_loss_dyn": latent_loss_dyn.item(), "lr_dec_t": self.lr_dec_t})
if self.use_gan and engine.state.iteration >= self.config["gan"]["start_iteration"]:
out_dict.update(disc_dict)
out_dict.update({"loss_gen_patch" :loss_gen.item(), "loss_fmap_patch": loss_fmap.item()})
if self.use_temp_disc and engine.state.iteration >= self.config["gan_temp"]["start_iteration"]:
out_dict.update(disc_dict_temp)
out_dict.update({"loss_gen_temp" :loss_gen_temp.item(), "loss_fmap_temp": loss_fmap_temp.item()})
#if self.pixel_decoder_loss:
out_dict.update({"pixel_loss_dec": loss_dec_dyn.item()})
return out_dict
self.logger.info("Initialize inception model...")
self.inception_model = FIDInceptionModel()
self.logger.info("Finished initialization of inception model...")
# note that lpips is exactly vgg-cosine similarity as proposed in the google papers and savp
self.lpips_fn = LPIPS(net="vgg")
def eval_step(engine, eval_batch):
net.eval()
out_dict = {}
with torch.no_grad():
# prepare data
weights = None
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:,0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:,1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
app_img_tr = eval_batch["app_img_random"].cuda(self.all_devices[0])
x_trans, *_ = net(app_img_tr, x_t, poke,len=0)
loss_style = style_loss(self.vgg, app_img_tr, x_trans)
out_dict.update({"style_loss_eval": loss_style.item()})
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_t_hat, sigma_t, _, alpha = net(x_t,x_t,poke,len=0)
x_seq_hat, mu_delta, sigmas_hat,_ = net(x_t, x_t, poke,len=seq_len)
if weights is not None and self.config["testing"]["metrics_on_patches"]:
x_seq_hat = get_patches(x_seq_hat,weights,self.config["data"],test_dataset.weight_value_flow, logger=self.logger)
x_seq_gt = get_patches(x_seq_gt, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
sigmas_gt = []
ll_loss_dyn = []
rec_imgs = []
for n in range(seq_len):
x_hat_tn, s_tn, *_ = net(x_seq_gt[:, n], x_seq_gt[:, n], poke, len=0)
sigmas_gt.append(s_tn)
rec_imgs.append(x_hat_tn)
ll_dyn_n = vgg_loss_agg(self.vgg, x_seq_gt[:, n], x_seq_hat[:, n])
ll_loss_dyn.append(ll_dyn_n)
ll_loss_tk_eval = torch.stack(ll_loss_dyn,dim=0).mean()
rec_imgs = torch.stack(rec_imgs,1)
if self.use_norm_loss:
poke_norms = []
for p in poke:
magns = p.norm(dim=0)
ids = magns.nonzero(as_tuple=True)
if ids[0].shape[0] > 0:
poke_norms.append(magns[ids].mean().unsqueeze(0))
else:
poke_norms.append(torch.zeros(1).cuda(self.all_devices[0]))
poke_norms = torch.cat(poke_norms, 0)
norm_loss = ((poke_norms - mu_delta.reshape(poke_norms.shape[0], -1).norm(dim=-1)) ** 2).mean()
out_dict.update({"norm_loss": norm_loss.item()})
if weights is not None and self.config["testing"]["metrics_on_patches"]:
rec_imgs = get_patches(rec_imgs, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
# apply inception model for fid calculation at time t+k
for t in range(x_seq_gt.shape[1]):
real_features_t = self.inception_model(x_seq_gt[:, t]).cpu().numpy()
fake_features_t = self.inception_model(x_seq_hat[:, t]).cpu().numpy()
if t not in self.fid_feats_fake_per_frame:
self.fid_feats_fake_per_frame.update({t: fake_features_t})
self.fid_feats_real_per_frame.update({t: real_features_t})
else:
self.fid_feats_fake_per_frame[t] = np.concatenate([self.fid_feats_fake_per_frame[t], fake_features_t], axis=0)
self.fid_feats_real_per_frame[t] = np.concatenate([self.fid_feats_real_per_frame[t], real_features_t], axis=0)
# evaluate training losses
# ll_loss_tk_eval = vgg_loss_agg(self.vgg, x_tk, x_tk_hat)
ll_loss_t_i_eval = vgg_loss_agg(self.vgg, x_t, x_t_hat)
dyn_losses = []
for s_tk, s_hat_tk in zip(sigmas_gt, sigmas_hat):
dyn_losses.append(latent_dynamics_loss(s_hat_tk, s_tk, []))
latent_loss_dyn_eval = torch.stack(dyn_losses).mean()
out_dict.update({"vgg_loss_dyn_eval": ll_loss_tk_eval.item(), "loss_dis_i_eval": ll_loss_t_i_eval.item(), "latent_loss_dyn_eval": latent_loss_dyn_eval.item()})
#if self.pixel_decoder_loss:
#x_t_hat_dec = net.dec(sigma_t, alpha)
#loss_dec_dyn = (vgg_loss_agg(self.vgg, x_t_hat_dec, x_tk_hat) - vgg_loss_agg(self.vgg, x_t, x_tk)) ** 2
loss_dec_dyn = []
for n in range(seq_len - 1):
loss_dec_dyn_tn = pixel_dynamics_loss(x_seq_gt[:, n], x_seq_gt[:, n + 1], rec_imgs[:,n], x_seq_hat[:, n + 1])
loss_dec_dyn.append(loss_dec_dyn_tn)
loss_dec_dyn = torch.stack(loss_dec_dyn, dim=0).mean()
out_dict.update({"pixel_loss_dec_eval": loss_dec_dyn.item()})
# compute metrics
ssim_t = ssim_lightning(x_t, x_t_hat)
psnr_t = psnr_lightning(x_t, x_t_hat)
lpips_t = metric_lpips(x_t,x_t_hat, self.lpips_fn, reduce=False)
ssim_tk, ssim_per_frame = ssim_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
psnr_tk, psnr_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
lpips_avg, lpips_per_frame = metric_lpips(x_seq_gt, x_seq_hat,self.lpips_fn,reduce=False,return_per_frame=True)
# ssim_pl, ssim_pl_per_frame = ssim_lightning(x_seq_gt,x_seq_hat,return_per_frame=True)
# psnr_pl, psnr_pl_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
# append to arrays
self.lpips["t"].append(lpips_t)
self.psnrs["t"].append(psnr_t)
self.ssims["t"].append(ssim_t)
self.psnrs["tk"].append(psnr_tk)
self.ssims["tk"].append(ssim_tk)
self.lpips["tk"].append(lpips_avg)
#self.ssims["pl"].append(ssim_pl)
#self.psnrs["pl"].append(psnr_pl)
# append the values of the respective sequence length
[self.ssims_per_frame[key].append(ssim_per_frame[key]) if key in self.ssims_per_frame else self.ssims_per_frame.update({key:[ssim_per_frame[key]]}) for key in ssim_per_frame]
[self.psnrs_per_frame[key].append(psnr_per_frame[key]) if key in self.psnrs_per_frame else self.psnrs_per_frame.update({key:[psnr_per_frame[key]]}) for key in psnr_per_frame]
[self.lpips_per_frame[key].append(lpips_per_frame[key]) if key in self.lpips_per_frame else self.lpips_per_frame.update({key:[lpips_per_frame[key]]}) for key in lpips_per_frame]
#[self.ssims_per_frame_pl[key].append(ssim_pl_per_frame[key]) if key in self.ssims_per_frame_pl else self.ssims_per_frame_pl.update({key: [ssim_pl_per_frame[key]]}) for key in ssim_pl_per_frame]
#[self.psnrs_per_frame_pl[key].append(psnr_pl_per_frame[key]) if key in self.psnrs_per_frame_pl else self.psnrs_per_frame_pl.update({key: [psnr_pl_per_frame[key]]}) for key in psnr_pl_per_frame]
return out_dict
# test_it steps are performed while generating test_imgs, there n_test_img is overall number divided by number of test iterations
n_test_img = int(self.config["testing"]["n_test_img"] // self.config["testing"]["test_it"])
def eval_visual(engine, eval_batch):
net.eval()
with torch.no_grad():
# prepare data
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1]
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
flow = eval_batch["flow"]
if self.config["architecture"]["disentanglement"]:
shape_img = eval_batch["img_aT"].cuda(self.all_devices[0])
else:
shape_img = x_t
seq_len = x_seq_gt.shape[1]
x_seq_hat, *_ = net(x_t,x_t, poke, len=seq_len)
x_t_hat , *_ = net(x_seq_gt[:,-1],shape_img,poke,len=0)
grid_dis = make_img_grid(x_seq_gt[:,-1],shape_img, x_t_hat,x_t, n_logged=n_test_img)
grid_dyn = make_flow_grid(x_t, poke, x_seq_hat[:,-1], x_seq_gt[:,-1], n_logged=n_test_img, flow=flow)
seq_vis_hat = torch.cat([x_t.unsqueeze(1), x_seq_hat], 1)
seq_vis_gt = torch.cat([x_t.unsqueeze(1), x_seq_gt], 1)
grid_anim = make_video(x_t,poke,seq_vis_hat,seq_vis_gt,n_logged=n_test_img,flow=flow)
it = engine.state.iteration
log_dict = {"Last Frame Comparison Test data": wandb.Image(grid_dyn, caption=f"Last frames test grid #{it}."),
"Disentanglement Grid Test Data": wandb.Image(grid_dis, caption=f"Test grid disentanglement #{it}."),
"Video Grid Test Data": wandb.Video(grid_anim,caption=f"Test Video Grid #{it}.",fps=5)}
if self.config["testing"]["eval_app_transfer"]:
app_img_unrelated = eval_batch["app_img_random"].cuda(self.all_devices[0])
x_transferred, *_ = net(app_img_unrelated,x_t, poke,len=0)
transfer_grid = make_img_grid(app_img_unrelated,x_t,x_transferred)
log_dict.update({"Appearance transfer grid Test Data": wandb.Image(transfer_grid, caption=f"Test_grid appearance transfer #{it}")})
wandb.log(log_dict)
return None
self.logger.info("Initialize engines...")
trainer = Engine(train_step)
evaluator = Engine(eval_step)
test_img_generator = Engine(eval_visual)
self.logger.info("Finish engine initialization...")
# checkpointing
ckpt_handler = ModelCheckpoint(self.dirs["ckpt"], "reg_ckpt", n_saved=10, require_empty=False)
if self.config["training"]["two_stage"]:
save_dict = {"model": net, "optimizer_dis": optimizer_dis, "optimizer_dyn": optimizer_dyn}
else:
save_dict = {"model": net, "optimizer_dyn": optimizer_dyn}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler,
save_dict)
if self.use_gan:
ckpt_handler_disc = ModelCheckpoint(self.dirs["ckpt"], gan_trainer.load_key, n_saved=10, require_empty=False)
save_dict_disc = {"model": gan_trainer.disc, "optimizer": gan_trainer.disc_opt}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler_disc,
save_dict_disc)
if self.use_temp_disc:
ckpt_handler_disc_temp = ModelCheckpoint(self.dirs["ckpt"], gan_trainer_temp.load_key, n_saved=10, require_empty=False)
save_dict_disc_temp = {"model": gan_trainer_temp.disc, "optimizer": gan_trainer_temp.disc_opt}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler_disc_temp,
save_dict_disc_temp)
pbar = ProgressBar(ascii=True)
pbar.attach(trainer, output_transform=lambda x: x)
pbar.attach(evaluator, output_transform=lambda x: x)
#reduce the learning rate of the decoder for the image reconstruction task, such that the model focusses more on t --> tk
if self.config["training"]["two_stage"]:
@trainer.on(Events.ITERATION_COMPLETED)
def update_lr(engine):
self.lr_dec_t = lr_dec_rec(engine.state.iteration)
for g in optimizer_dis.param_groups:
if g["name"] == "decoder":
g["lr"] = self.lr_dec_t
@trainer.on(Events.ITERATION_COMPLETED(every=self.config["testing"]["log_intervall"]))
def log(engine):
it = engine.state.iteration
wandb.log({"iteration": it})
# log losses
for key in engine.state.output:
wandb.log({key: engine.state.output[key]})
data = engine.state.batch
if test_dataset.flow_weights:
poke = data["poke"][0].cuda(self.all_devices[0])
else:
poke = data["poke"].cuda(self.all_devices[0])
x_t = data["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = data["images"][:, 1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
shape_img = data["img_aT"].cuda(self.all_devices[0])
else:
shape_img = x_t
net.eval()
seq_len = x_seq_gt.shape[1]
with torch.no_grad():
x_seq_hat, *_ = net(x_t, x_t, poke, len=seq_len)
x_t_hat, *_ = net(x_seq_gt[:,-1], shape_img, poke,len=0)
#x_t_hat_e, *_ = net(img_aT, img_sT, poke)
grid_dis_i = make_img_grid(x_seq_gt[:,-1], shape_img, x_t_hat, x_t, n_logged=n_test_img)
grid_dyn = make_flow_grid(x_t, poke, x_seq_hat[:,-1], x_seq_gt[:,-1], n_logged=n_test_img)
seq_vis_hat = torch.cat([x_t.unsqueeze(1),x_seq_hat],1)
seq_vis_gt = torch.cat([x_t.unsqueeze(1), x_seq_gt], 1)
grid_anim = make_video(x_t,poke,seq_vis_hat,seq_vis_gt,n_logged=n_test_img)
wandb.log({"Last Frame Comparison Train Data": wandb.Image(grid_dyn, caption=f"Last frames train grid after {it} train steps."),
"Disentanglement Grid Invariance Train Data": wandb.Image(grid_dis_i, caption=f"Invariance Disentanglement Grid on train set after {it} train steps."),
"Video Grid Train Data": wandb.Video(grid_anim, caption=f"Train Video Grid after {it} train steps",fps=5)})
#"Disentanglement Grid Equivariance Train Data": wandb.Image(grid_dis_e, caption=f"Eqiuvariance Disentanglement Grid on train set after {it} train steps.")
self.logger.info("Initialize metrics...")
# compute loss average over epochs
# Average(output_transform=lambda x: x["loss_dis"]).attach(trainer, "loss_dis-epoch_avg")
Average(output_transform=lambda x: x["loss_dis"]).attach(trainer, "loss_dis-epoch_avg")
Average(output_transform=lambda x: x["vgg_loss_dyn"]).attach(trainer, "vgg_loss_dyn-epoch_avg")
Average(output_transform=lambda x: x["latent_loss_dyn"]).attach(trainer, "latent_loss_dyn-epoch_avg")
if self.config["architecture"]["disentanglement"]:
Average(output_transform=lambda x: x["style_loss"]).attach(trainer, "style_loss-epoch_avg")
Average(output_transform=lambda x: x["style_loss_eval"]).attach(evaluator, "style_loss_eval")
if self.use_norm_loss:
Average(output_transform=lambda x: x["norm_loss"]).attach(trainer, "norm_loss-epoch_avg")
Average(output_transform=lambda x: x["norm_loss"]).attach(evaluator, "norm_loss_eval")
if self.config["architecture"]["dynamics_var"]:
Average(output_transform=lambda x: x["kl_dyn"]).attach(trainer, "kl_dyn_loss-epoch_avg")
if self.use_temp_disc or self.use_gan:
def gan_training_started(engine,epoch, key="gan"):
return engine.state.iteration >= self.config[key]["start_iteration"]
if self.use_gan:
use_patchgan_metrics = MetricUsage(started=Events.EPOCH_STARTED(event_filter=gan_training_started),
completed=Events.EPOCH_COMPLETED(event_filter=gan_training_started),
iteration_completed=Events.ITERATION_COMPLETED(event_filter=gan_training_started))
# gan losses
Average(output_transform=lambda x: x["loss_gen_patch"]).attach(trainer, "loss_gen_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["loss_fmap_patch"]).attach(trainer, "loss_fmap_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["loss_disc_patch"]).attach(trainer, "loss_disc_patch-epoch_avg",usage=use_patchgan_metrics)
#if self.config["gan"]["gp_weighflow_video_generatort"] > 0:
Average(output_transform=lambda x: x["loss_gp_patch"]).attach(trainer, "loss_gp_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["p_""true_patch"]).attach(trainer, "p_true_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["p_fake_patch"]).attach(trainer, "p_fake_patch-epoch_avg",usage=use_patchgan_metrics)
@trainer.on(Events.EPOCH_COMPLETED(event_filter=gan_training_started))
def gan_stuff(engine):
gan_trainer.disc_scheduler.step()
if self.use_temp_disc:
use_tmpgan_metrics = MetricUsage(started=Events.EPOCH_STARTED(event_filter=partial(gan_training_started,key="gan_temp")),
completed=Events.EPOCH_COMPLETED(event_filter=partial(gan_training_started,key="gan_temp")),
iteration_completed=Events.ITERATION_COMPLETED(event_filter=partial(gan_training_started,key="gan_temp")))
# gan losses
Average(output_transform=lambda x: x["loss_gen_temp"]).attach(trainer, "loss_gen_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["loss_fmap_temp"]).attach(trainer, "loss_fmap_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["loss_disc_temp"]).attach(trainer, "loss_disc_temp-epoch_avg",usage=use_tmpgan_metrics)
#if self.config["gan"]["gp_weight"] > 0:
Average(output_transform=lambda x: x["loss_gp_temp"]).attach(trainer, "loss_gp_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["p_true_temp"]).attach(trainer, "p_true_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["p_fake_temp"]).attach(trainer, "p_fake_temp-epoch_avg",usage=use_tmpgan_metrics)
@trainer.on(Events.EPOCH_COMPLETED(event_filter=gan_training_started))
def temp_disc_stuff(engine):
gan_trainer_temp.disc_scheduler.step()
# if self.pixel_decoder_loss:
Average(output_transform=lambda x: x["pixel_loss_dec"]).attach(trainer, "pixel_loss_dec-epoch_avg")
Average(output_transform=lambda x: x["pixel_loss_dec_eval"]).attach(evaluator, "pixel_loss_dec_eval")
# evaluation losses
Average(output_transform=lambda x: x["vgg_loss_dyn_eval"]).attach(evaluator, "vgg_loss_dyn_eval")
Average(output_transform=lambda x: x["loss_dis_i_eval"]).attach(evaluator, "loss_dis_i_eval")
Average(output_transform=lambda x: x["latent_loss_dyn_eval"]).attach(evaluator, "latent_loss_dyn_eval")
self.logger.info("Finish metric initialization.")
@trainer.on(Events.EPOCH_COMPLETED(every=self.config["testing"]["n_epoch_metrics"]))
def metrics(engine):
# set incpetion model to cpu
self.inception_model.eval()
self.inception_model.cuda(self.all_devices[0])
self.lpips_fn.cuda(self.all_devices[0])
self.lpips_fn.eval()
if self.use_temp_disc:
gan_trainer_temp.disc.cpu()
if self.use_gan:
gan_trainer.disc.cpu()
# compute metrics over an epoch
self.logger.info(f"Computing metrics after epoch #{engine.state.epoch}")
bs = 20 if self.is_debug else (int(8000 / eval_sampler.batch_size) if len(test_dataset) > 8000 else len(eval_loader))
evaluator.run(eval_loader, max_epochs=1, epoch_length=bs)
[wandb.log({key: evaluator.state.metrics[key]}) for key in evaluator.state.metrics]
# compute metrics
ssim_t = np.mean(np.stack(self.ssims["t"], axis=0))
psnr_t = np.mean(np.stack(self.psnrs["t"], axis=0))
lpips_t = np.mean(np.concatenate(self.lpips["t"], axis=0))
ssim_tk = np.mean(np.stack(self.ssims["tk"], axis=0))
psnr_tk = np.mean(np.stack(self.psnrs["tk"], axis=0))
lpips_avg = np.mean(np.concatenate(self.lpips["tk"], axis=0))
self.lpips_avg = lpips_avg
fid_per_frame = {}
for key in tqdm(self.fid_feats_real_per_frame, desc="Computing FID per frame"):
fid_per_frame[key] = metric_fid(self.fid_feats_real_per_frame[key], self.fid_feats_fake_per_frame[key])
# fid_tk = metric_fid(self.features_real_fid["tk"], self.features_fake_fid["tk"])
fid_avg = np.mean([fid_per_frame[key] for key in fid_per_frame])
log_dict = {"ssim-t": ssim_t, "psnr-t": psnr_t, "fid-avg": fid_avg, "lpips-t": lpips_t,"ssim-tk": ssim_tk, "psnr-tk": psnr_tk, "lpips-avg": lpips_avg}
# add histograms for per-frame-metrics
self.lpips_per_frame = {key: np.concatenate(self.lpips_per_frame[key], axis=0).mean() for key in self.lpips_per_frame}
self.ssims_per_frame = {key: np.stack(self.ssims_per_frame[key], axis=0).mean() for key in self.ssims_per_frame}
self.psnrs_per_frame = {key: np.stack(self.psnrs_per_frame[key], axis=0).mean() for key in self.psnrs_per_frame}
# self.ssims_per_frame_pl = {key: np.stack(self.ssims_per_frame_pl[key], axis=0).mean() for key in self.ssims_per_frame_pl}
# self.psnrs_per_frame_pl = {key: np.stack(self.psnrs_per_frame_pl[key], axis=0).mean() for key in self.psnrs_per_frame_pl}
x = [k+1 for k in self.lpips_per_frame]
make_plot(x,list(self.lpips_per_frame.values()),"LPIPS of predicted frames", ylabel="Average LPIPS",)
make_plot(x, list(self.ssims_per_frame.values()), "SSIM of predicted frames", ylabel="Average SSIM",)
make_plot(x, list(self.psnrs_per_frame.values()), "PSNR of predicted frames", ylabel="Average PSNR",)
make_plot(x, list(fid_per_frame.values()), "FIDs of predicted frames", ylabel="FID")
wandb.log(log_dict)
# clear collection arrays
self.__clear_metric_arrs()
self.inception_model.cpu()
self.lpips_fn.cpu()
if self.use_temp_disc:
gan_trainer_temp.disc.cuda(self.all_devices[0])
if self.use_gan:
gan_trainer.disc.cuda(self.all_devices[0])
@trainer.on(Events.ITERATION_COMPLETED(every=self.config["testing"]["test_img_intervall"]))
def make_test_grid(engine):
test_img_generator.run(test_loader, max_epochs=1, epoch_length=self.config["testing"]["test_it"])
@trainer.on(Events.EPOCH_COMPLETED)
def log_train_avg(engine):
wandb.log({"epoch": engine.state.epoch})
[wandb.log({key: engine.state.metrics[key]}) for key in engine.state.metrics]
# also perform scheduler step
if self.config["training"]["two_stage"]:
scheduler_dis.step()
scheduler_dyn.step()
def score_fn(engine):
assert self.lpips_avg is not None
return -self.lpips_avg
# define best ckpt
best_ckpt_handler = ModelCheckpoint(self.dirs["ckpt"],filename_prefix="ckpt_metric" ,score_function=score_fn,score_name="lpips",n_saved=5,require_empty=False)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=self.config["testing"]["n_epoch_metrics"]),best_ckpt_handler,save_dict)
@trainer.on(Events.STARTED)
def set_start_it(engine):
self.logger.info(f'Engine starting from iteration {start_it}, epoch {start_epoch}')
engine.state.iteration = start_it
engine.state.epoch = start_epoch
# run everything
n_step_per_epoch = 10 if self.is_debug else len(train_loader)
self.logger.info("Start training...")
trainer.run(train_loader, max_epochs=n_epoch_overall, epoch_length=n_step_per_epoch)
self.logger.info("End training.")
def test(self):
from tqdm import tqdm
import cv2
from os import makedirs,path
# load checkpoint
mod_ckpt, _ = self._load_ckpt("reg_ckpt", single_opt=False,use_best=self.config["testing"]["best_ckpt"],)
#dir="/export/data/ablattma/visual_poking/final_models/final_models/var_length/iper/",name="baseline_wo_upsampling.pt")
dataset, transforms = get_dataset(config=self.config["data"])
test_dataset = dataset(transforms, self.datakeys, self.config["data"], train=False)
# get datasets for training and testing
def w_init_fn(worker_id):
return np.random.seed(np.random.get_state()[1][0] + worker_id)
test_sampler = SequenceLengthSampler(test_dataset,
batch_size=self.config["testing"]["test_batch_size"] if self.config["testing"]["test_batch_size"] > 16 else 16,
shuffle=False,
drop_last=True,
zero_poke=False)
test_loader = DataLoader(
test_dataset,
batch_sampler=test_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn
)
# define model
self.logger.info(f"Load model...")
net_model = SkipSequenceModel if self.config["architecture"]["use_skip_model"] else ResidualSequenceBaseline
net = net_model(spatial_size=self.config["data"]["spatial_size"],
config=self.config["architecture"],)
weights = [5,5,0]
self.logger.info(
f"Number of trainable parameters in model is {sum(p.numel() for p in net.parameters())}"
)
net.load_state_dict(mod_ckpt)
net.cuda(self.all_devices[0])
self.logger.info("Model on gpu!")
net.eval()
if self.config["testing"]["mode"] == "noise_comp":
target_dir = path.join(self.dirs["generated"], "test_decoder_noise")
makedirs(target_dir, exist_ok=True)
n_gen = 0
for i,batch in enumerate(test_loader):
if n_gen > 100:
break
if test_dataset.flow_weights:
poke = batch["poke"][0].cuda(self.all_devices[0])
else:
poke = batch["poke"].cuda(self.all_devices[0])
img = batch["images"][:,0].cuda(self.all_devices[0])
with torch.no_grad():
x_rec, sigmas, *_ = net(img,img,poke,len=0)
# add noise to sigmas
for n in tqdm(range(x_rec.shape[0]),desc=f'Generating noise xs for batch #{i}'):
simgs_noise = [weights[k] * torch.randn((self.config["testing"]["n_examples_noise"],*sigm[n].shape)).cuda(self.all_devices[0])+sigm[n].unsqueeze(0) for k,sigm in enumerate(sigmas)]
# xs = torch.stack([img[n]]*self.config["testing"]["n_examples"])
simgs_noise.reverse()
xs_noise = net.dec(simgs_noise,None,del_shape=True)
xs_disp = torch.cat([x_rec[n].unsqueeze(0),xs_noise],dim=0)
xs_disp = np.concatenate([((x.permute(1,2,0).cpu().numpy()+ 1.)*127.5).astype(np.uint8) for x in xs_disp],axis=1)
n_gen += 1
xs_disp = cv2.cvtColor(xs_disp,cv2.COLOR_RGB2BGR)
cv2.imwrite(path.join(target_dir,f"noise_imgs_{n_gen}.png"),xs_disp)
elif self.config["testing"]["mode"] == "metrics":
fid_feats_real_per_frame = {}
fid_feats_fake_per_frame = {}
def metric_step(engine, eval_batch):
net.eval()
out_dict = {}
with torch.no_grad():
# prepare data
weights = None
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_t_hat, sigma_t, _, alpha = net(x_t, x_t, poke, len=0)
x_seq_hat, _, sigmas_hat, _ = net(x_t, x_t, poke, len=seq_len)
if weights is not None and self.config["testing"]["metrics_on_patches"]:
x_seq_hat = get_patches(x_seq_hat, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
x_seq_gt = get_patches(x_seq_gt, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
# apply inception model for fid calculation at time t+k
for t in range(x_seq_gt.shape[1]):
real_features_t = self.inception_model(x_seq_gt[:, t]).cpu().numpy()
fake_features_t = self.inception_model(x_seq_hat[:, t]).cpu().numpy()
if t not in fid_feats_fake_per_frame:
fid_feats_fake_per_frame.update({t:fake_features_t})
fid_feats_real_per_frame.update({t: real_features_t})
else:
fid_feats_fake_per_frame[t] = np.concatenate([fid_feats_fake_per_frame[t],fake_features_t], axis=0)
fid_feats_real_per_frame[t] = np.concatenate([fid_feats_real_per_frame[t], real_features_t], axis=0)
#self.features_real_fid["tk"].append(real_features_tk)
#self.features_fake_fid["tk"].append(fake_features_tk)
self.fvd_vids_real.append(x_seq_gt.cpu())
self.fvd_vids_fake.append(x_seq_hat.cpu())
ssim_tk, ssim_per_frame = ssim_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
psnr_tk, psnr_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
lpips_avg, lpips_per_frame = metric_lpips(x_seq_gt, x_seq_hat, self.lpips_fn, reduce=False, return_per_frame=True)
# append to arrays
self.psnrs["tk"].append(psnr_tk)
self.ssims["tk"].append(ssim_tk)
self.lpips["tk"].append(lpips_avg)
# append the values of the respective sequence length
[self.ssims_per_frame[key].append(ssim_per_frame[key]) if key in self.ssims_per_frame else self.ssims_per_frame.update({key: [ssim_per_frame[key]]}) for key in ssim_per_frame]
[self.psnrs_per_frame[key].append(psnr_per_frame[key]) if key in self.psnrs_per_frame else self.psnrs_per_frame.update({key: [psnr_per_frame[key]]}) for key in psnr_per_frame]
[self.lpips_per_frame[key].append(lpips_per_frame[key]) if key in self.lpips_per_frame else self.lpips_per_frame.update({key: [lpips_per_frame[key]]}) for key in lpips_per_frame]
return out_dict
evaluator = Engine(metric_step)
self.logger.info("Initialize inception model...")
self.inception_model = FIDInceptionModel()
self.logger.info("Finished initialization of inception model...")
# note that lpips is exactly vgg-cosine similarity as proposed in the google papers and savp
self.lpips_fn = LPIPS(net="vgg")
pbar = ProgressBar(ascii=True)
pbar.attach(evaluator, output_transform=lambda x: x)
# set incpetion model to cpu
self.inception_model.eval()
self.inception_model.cuda(self.all_devices[0])
self.lpips_fn.cuda(self.all_devices[0])
self.lpips_fn.eval()
# compute metrics over an epoch
self.logger.info(f"Start metrics computation.")
el = (int(8000 / test_sampler.batch_size) if len(test_dataset) > 8000 else len(test_loader))
evaluator.run(test_loader, max_epochs=1, epoch_length=el)
# [wandb.log({key: evaluator.state.metrics[key]}) for key in evaluator.state.metrics]
# compute metrics
ssim_tk = np.mean(np.stack(self.ssims["tk"], axis=0))
psnr_tk = np.mean(np.stack(self.psnrs["tk"], axis=0))
lpips_avg = np.mean(np.concatenate(self.lpips["tk"], axis=0))
assert list(fid_feats_real_per_frame.keys()) == list(fid_feats_fake_per_frame.keys())
fid_per_frame = {}
for key in tqdm(fid_feats_real_per_frame, desc="Computing FID per frame"):
fid_per_frame[key] = metric_fid(fid_feats_real_per_frame[key], fid_feats_fake_per_frame[key])
#fid_tk = metric_fid(self.features_real_fid["tk"], self.features_fake_fid["tk"])
fid_avg = np.mean([fid_per_frame[key] for key in fid_per_frame])
log_dict = {"ssim-avg-temp": ssim_tk, "psnr-avg_temp": psnr_tk, "fid-avg_temp": fid_avg, "lpips-avg-temp": lpips_avg}
# add histograms for per-frame-metrics
self.lpips_per_frame = {key: np.concatenate(self.lpips_per_frame[key], axis=0).mean() for key in self.lpips_per_frame}
self.ssims_per_frame = {key: np.stack(self.ssims_per_frame[key], axis=0).mean() for key in self.ssims_per_frame}
self.psnrs_per_frame = {key: np.stack(self.psnrs_per_frame[key], axis=0).mean() for key in self.psnrs_per_frame}
savedir = path.join(self.dirs["generated"],"metric_summaries")
makedirs(savedir, exist_ok=True)
x = [k+1 for k in self.lpips_per_frame]
make_plot(x,list(self.lpips_per_frame.values()),"LPIPS of predicted frames", ylabel="Average LPIPS",savename=path.join(savedir,"lpips.svg"))
make_plot(x, list(self.ssims_per_frame.values()), "SSIM of predicted frames", ylabel="Average SSIM", savename=path.join(savedir, "ssim.svg"))
make_plot(x, list(self.psnrs_per_frame.values()), "PSNR of predicted frames", ylabel="Average PSNR", savename=path.join(savedir, "psnr.svg"))
make_plot(x,list(fid_per_frame.values()), "FIDs of predicted frames", ylabel="FID", savename=path.join(savedir, "fid.svg"))
self.logger.info("Averaged metrics: ")
for key in log_dict:
self.logger.info(f'{key}: {log_dict[key]}')
elif self.config["testing"]["mode"] == "fvd":
test_sampler = SequenceLengthSampler(test_dataset,16,shuffle=True, drop_last=True, n_frames=10,zero_poke=False)
test_loader = DataLoader(test_dataset,batch_sampler=test_sampler,num_workers=self.config["data"]["num_workers"],worker_init_fn=w_init_fn)
real_samples = []
fake_samples = []
def generate_vids(engine,eval_batch):
net.eval()
with torch.no_grad():
# prepare data
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_seq_hat, *_ = net(x_t, x_t, poke, len=seq_len)
real_batch = ((x_seq_gt + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy().astype(np.uint8)
fake_batch = ((x_seq_hat + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy().astype(np.uint8)
real_samples.append(real_batch)
fake_samples.append(fake_batch)
generator = Engine(generate_vids)
pbar = ProgressBar(ascii=True)
pbar.attach(generator, output_transform=lambda x: x)
self.logger.info(f"Start collecting sequences for fvd computation...")
el = (int(1000 / test_sampler.batch_size) if len(test_dataset) > 1000 else len(test_loader))
generator.run(test_loader,max_epochs=1,epoch_length=el)
savedir = path.join(self.dirs["generated"],"samples_fvd")
makedirs(savedir,exist_ok=True)
real_samples = np.stack(real_samples,axis=0)
fake_samples = np.stack(fake_samples, axis=0)
np.save(path.join(savedir,"real_samples.npy"),real_samples)
np.save(path.join(savedir,"fake_samples.npy"),fake_samples)
self.logger.info(f'Finish generation of vid samples.')
else:
raise ValueError(f'Specified testing mode "{self.config["testing"]["mode"]}" does not exist.') | 58,565 | 53.581547 | 210 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/losses.py | import torch
from torch import nn
from torchvision.models import vgg19
from collections import namedtuple
from operator import mul
from functools import reduce
from utils.general import get_member
VGGOutput = namedtuple(
"VGGOutput",
["input", "relu1_2", "relu2_2", "relu3_2", "relu4_2", "relu5_2"],
)
StyleLayers = namedtuple("StyleLayers",["relu1_2","relu2_2","relu3_3", "relu4_3"])
class PerceptualVGG(nn.Module):
def __init__(self, weights=None):
super().__init__()
self.vgg = vgg19(pretrained=True)
self.vgg.eval()
self.vgg_layers = self.vgg.features
self.register_buffer(
"mean",
torch.tensor([0.485, 0.456, 0.406], dtype=torch.float)
.unsqueeze(dim=0)
.unsqueeze(dim=-1)
.unsqueeze(dim=-1),
)
self.register_buffer(
"std",
torch.tensor([0.229, 0.224, 0.225], dtype=torch.float)
.unsqueeze(dim=0)
.unsqueeze(dim=-1)
.unsqueeze(dim=-1),
)
self.target_layers = {
"3": "relu1_2",
"8": "relu2_2",
"13": "relu3_2",
"15" : "relu3_3",
"22": "relu4_2",
"24" : "relu4_3",
"31": "relu5_2",
}
if weights is None:
self.loss_weights = {"input":1., "relu1_2": 1.,"relu2_2": 1.,"relu3_2": 1.,"relu3_3": 1.,"relu4_2": 1.,"relu4_3": 1.,"relu5_2": 1. }
else:
assert isinstance(weights, dict) and list(weights.keys()) == list(self.target_layers.keys()), f"The weights passed to PerceptualVGG have to be a dict with the keys {list(self.target_layers.keys())}"
self.loss_weights = weights
def forward(self, x):
# IMPORTANT: Input is assumed to be in range [-1,1] here.
x = (x + 1.0) / 2.0
x = (x - self.mean) / self.std
# add also common reconstruction loss in pixel space
out = {"input": x}
for name, submodule in self.vgg_layers._modules.items():
# x = submodule(x)
if name in self.target_layers:
x = submodule(x)
out[self.target_layers[name]] = x
else:
x = submodule(x)
return out
def vgg_loss(custom_vgg:PerceptualVGG, target, pred, weights=None):
"""
Implements a vgg based perceptual loss, as extensively used for image/video generation tasks
:param custom_vgg: The vgg feature extractor for the perceptual loss, definition see above
:param target:
:param pred:
:return:
"""
target_feats = custom_vgg(target)
pred_feats = custom_vgg(pred)
target_feats = VGGOutput(**{key: target_feats[key] for key in VGGOutput._fields})
pred_feats = VGGOutput(**{key: pred_feats[key] for key in VGGOutput._fields})
names = list(pred_feats._asdict().keys())
if weights is None:
losses = {}
for i, (tf, pf) in enumerate(zip(target_feats, pred_feats)):
loss = get_member(custom_vgg,"loss_weights")[VGGOutput._fields[i]] * torch.mean(
torch.abs(tf - pf)
).unsqueeze(dim=-1)
losses.update({names[i]: loss})
else:
losses = {
names[0]: get_member(custom_vgg,"loss_weights")[VGGOutput._fields[0]]
* torch.mean(weights * torch.abs(target_feats[0] - pred_feats[0]))
.unsqueeze(dim=-1)
.to(torch.float)
}
for i, (tf, pf) in enumerate(zip(target_feats[1:], pred_feats[1:])):
loss = get_member(custom_vgg,"loss_weights")[i + 1] * torch.mean(
torch.abs(tf - pf)
).unsqueeze(dim=-1)
losses.update({names[i + 1]: loss})
return losses
def vgg_loss_agg(vgg, target, pred, weights=None):
"""
To aggreagate the vgg losses
:param vgg:
:param target:
:param pred:
:param weights:
:return:
"""
# basic_device = target.get_device()
# net_device = list(vgg.parameters())[0].get_device()
# pred = pred.cuda(net_device)
# target = target.cuda(net_device)
loss_list = vgg_loss(vgg,target,pred,weights)
loss_tensor = torch.stack([loss_list[key] for key in loss_list],dim=0,)
return loss_tensor.sum()#.cuda(basic_device)
class PixelDynamicsLoss(nn.Module):
def __init__(self, diff_pp=False):
super().__init__()
self.diff_pp = diff_pp
def forward(self,target_t,target_tk,pred_t,pred_tk):
if self.diff_pp:
loss = (((target_t-target_tk).abs()-(pred_t.detach()-pred_tk).abs()).mean())**2
else:
loss = ((target_t-target_tk).abs().mean()-(pred_t.detach()-pred_tk).abs().mean())**2
return loss
def pixel_triplet_loss(target_t,target_tk,pred_t, pred_tk,vgg:PerceptualVGG,layerwise = True, detach=True, diff_pp=False):
"""
:param vgg:
:param target_t:
:param target_tk:
:param pred_t:
:param pred_tk:
:param layerwise:
:param detach: whether or not to detach the predicted feats at time t
:param diff_pp: whether to consider differences for each spatial location in each channel or average over all (default average)
:return:
"""
if layerwise:
losses = {}
# old_device = target_tk.get_device()
# new_device = list(vgg.parameters())[0].get_device()
# timestep t
# target_t = target_t.cuda(new_device)
# pred_t = pred_t.cuda(new_device)
target_feats_t = vgg(target_t.cuda())
pred_feats_t = vgg(pred_t.detach() if detach else pred_t)
target_feats_t = VGGOutput(**{key: target_feats_t[key] for key in VGGOutput._fields})
pred_feats_t = VGGOutput(**{key: pred_feats_t[key] for key in VGGOutput._fields})
# timestep tk
# target_tk = target_tk.cuda(new_device)
# pred_tk = pred_tk.cuda(new_device)
target_feats_tk = vgg(target_tk)
pred_feats_tk = vgg(pred_tk)
target_feats_tk = VGGOutput(**{key: target_feats_tk[key] for key in VGGOutput._fields})
pred_feats_tk = VGGOutput(**{key: pred_feats_tk[key] for key in VGGOutput._fields})
names = list(pred_feats_t._asdict().keys())
for i, (tft, pft, tftk, pftk) in enumerate(zip(target_feats_t, pred_feats_t,target_feats_tk, pred_feats_tk)):
if diff_pp:
loss = get_member(vgg,"loss_weights")[VGGOutput._fields[i]] * torch.mean((torch.abs(tft - tftk) - torch.abs(pft - pftk)) ** 2).unsqueeze(dim=-1)
else:
loss = get_member(vgg,"loss_weights")[VGGOutput._fields[i]] * (torch.mean(torch.abs(tft - tftk)).unsqueeze(dim=-1) - torch.mean(torch.abs(pft - pftk)).unsqueeze(dim=-1))**2
losses.update({names[i]: loss})
loss_tensor = torch.stack([losses[key] for key in losses], dim=0, )
ptl = loss_tensor.sum() #.cuda(old_device)
else:
ptl = (vgg_loss_agg(vgg, pred_t.detach(), pred_tk) - vgg_loss_agg(vgg, target_t, target_tk)) ** 2
return ptl
def style_loss(vgg,style_target, pred):
target_feats = vgg(style_target)
pred_feats = vgg(pred)
target_feats = StyleLayers(**{key: target_feats[key] for key in StyleLayers._fields})
pred_feats = StyleLayers(**{key: pred_feats[key] for key in StyleLayers._fields})
names = list(pred_feats._asdict().keys())
style_outs = {}
# compute gram matrices and take squared frobenius norm
for i, (tf,pf) in enumerate(zip(target_feats,pred_feats)):
shape = pf.shape
pf = pf.reshape(*shape[:2],-1)
tf = tf.reshape(*shape[:2],-1)
gram_diff = 1. / (shape[1]*shape[2]*shape[3]) * (torch.matmul(pf,pf.permute(0,2,1)) - torch.matmul(tf,tf.permute(0,2,1)))
loss = (torch.norm(gram_diff, p="fro",dim=[1,2])**2).mean()
style_outs.update({names[i]:loss})
style_outs = torch.stack([style_outs[key] for key in style_outs])
return style_outs.sum()
class DynamicsLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, config):
super().__init__()
self.mse = nn.MSELoss()
def forward(self, anchor, positive, negative, ):
if isinstance(anchor,list) and isinstance(positive,list):
losses = []
for a,p in zip(anchor,positive):
losses.append(self.mse(a,p))
return torch.stack(losses).mean()
else:
return self.mse(anchor,positive)
def kl_loss_check(latents):
"""
Estimates a gaussian from the latents and returns the kl_divergence between this gaussian and the standard normal
:param latents:
:return:
"""
mu = latents[:,:int(latents.shape[1] / 2)]
sigma = latents[:,int(latents.shape[1] / 2):]
# reparameterize
logstd = nn.Sigmoid()(sigma)
return kl_loss(mu,logstd)
def kl_loss(mu, logstd):
if len(mu.shape) != 2:
mu = mu.reshape(mu.shape[0],-1)
logstd = logstd.reshape(mu.shape[0],-1)
dim = mu.shape[1]
std = torch.exp(logstd)
kl = torch.sum(-logstd + 0.5 * (std ** 2 + mu ** 2), dim=-1) - (0.5 * dim)
return kl.mean()
| 9,190 | 33.423221 | 210 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/metric_fvd.py | import numpy as np
import argparse
from os import path
import torch
import ssl
from glob import glob
from natsort import natsorted
ssl._create_default_https_context = ssl._create_unverified_context
import cv2
from utils.metrics import compute_fvd
from utils.general import get_logger
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source", type=str,
required=True,
help="Source directory where the data is stored.")
parser.add_argument("--gpu",type=int, required=True, help="The target device.")
parser.add_argument("-v","--visualize",default=False,action="store_true")
args = parser.parse_args()
if not path.isdir(args.source):
raise NotADirectoryError(f'The specified, data-holding directory {args.source} is not existing...')
file = path.basename(__file__)
logger = get_logger(file)
logger.info("Read in data...")
real_samples_list = natsorted(glob(path.join(args.source, "real_samples_*.npy")))
fake_samples_list = natsorted(glob(path.join(args.source, "fake_samples_*.npy")))
if len(real_samples_list) == 0:
fake_samples_list = [path.join(args.source, "fake_samples.npy")]
real_samples_list = [path.join(args.source, "real_samples.npy")]
for i,(real_samples, fake_samples) in enumerate(zip(real_samples_list,fake_samples_list)):
try:
length = int(real_samples.split("/")[-1].split(".")[0].split("_")[2])
context = int(real_samples.split("/")[-1].split(".npy")[0].split("_")[-1])
logger.info(f"processing samples of length {length} with {context} context frames.")
except:
logger.info(f"Processing standard samples")
real_samples = np.load(real_samples)
fake_samples = np.load(fake_samples)
if args.visualize:
vis_real = real_samples[0,0]
vis_fake = fake_samples[0,0]
# visualize
writer = cv2.VideoWriter(
path.join(args.source, "test_vid_fake.mp4"),
cv2.VideoWriter_fourcc(*"MP4V"),
5,
(vis_fake.shape[2], vis_fake.shape[1]),
)
# writer = vio.FFmpegWriter(savename,inputdict=inputdict,outputdict=outputdict)
for frame in vis_fake:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
writer.write(frame)
writer.release()
writer = cv2.VideoWriter(
path.join(args.source, "test_vid_real.mp4"),
cv2.VideoWriter_fourcc(*"MP4V"),
5,
(vis_real.shape[2], vis_real.shape[1]),
)
# writer = vio.FFmpegWriter(savename,inputdict=inputdict,outputdict=outputdict)
for frame in vis_real:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
writer.write(frame)
writer.release()
real_samples = real_samples[:62] if real_samples.shape[0] > 62 else real_samples
fake_samples = fake_samples[:62] if fake_samples.shape[0] > 62 else fake_samples
logger.info(f'Number of samples: {len(fake_samples)}')
target_device = args.gpu
real_samples = list(real_samples)
fake_samples = list(fake_samples)
real_samples = [torch.from_numpy(r) for r in real_samples]
fake_samples = [torch.from_numpy(r) for r in fake_samples]
fvd_val = compute_fvd(real_samples,fake_samples,target_device,logger)
| 3,569 | 30.59292 | 107 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/testing.py | import numpy as np
import torch
from skimage.metrics import structural_similarity as ssim
import cv2
import math
import imutils
import matplotlib.pyplot as plt
import wandb
from os import path
import math
def make_flow_grid(src, poke, pred, tgt, n_logged, flow=None):
"""
:param src:
:param poke:
:param pred:
:param tgt:
:param n_logged:
:return:
"""
src = ((src.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
# poke = poke.permute(0, 2, 3, 1).cpu().numpy()[:n_logged]
# poke -= poke.min()
# poke /= poke.max()
# poke = (poke * 255.0).astype(np.uint8)
# poke = np.concatenate([poke, np.expand_dims(np.zeros_like(poke).sum(-1), axis=-1)], axis=-1).astype(np.uint8)
poke = vis_flow(poke[:n_logged])
poke = np.concatenate(poke,axis=1)
# if prediction is image, just take the premuted image
if pred.shape[1] == 3:
pred = ((pred.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(
np.uint8)[:n_logged]
else:
# if prediction is flow, you need treat it like that
pred = pred.permute(0, 2, 3, 1).cpu().numpy()[:n_logged]
pred -= pred.min()
pred /= pred.max()
pred = (pred * 255.0).astype(np.uint8)
pred = np.concatenate([pred, np.expand_dims(np.zeros_like(pred).sum(-1), axis=-1)], axis=-1).astype(np.uint8)
tgt = ((tgt.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(
np.uint8)[:n_logged]
tgt_gr = [cv2.cvtColor(t,cv2.COLOR_RGB2GRAY) for t in tgt]
pred_gr = [cv2.cvtColor(t,cv2.COLOR_RGB2GRAY) for t in pred]
ssim_imgs = [ssim(rimg, fimg, multichannel=True, data_range=255, gaussian_weights=True, use_sample_covariance=False, full=True)[1] for rimg, fimg in zip(tgt_gr, pred_gr)]
additional = [np.concatenate([cv2.cvtColor((s * 255.).astype(np.uint8),cv2.COLOR_GRAY2RGB) for s in ssim_imgs],axis=1)]
if flow is not None:
# if provided, use additional flow information (in case of poking, that's the entire flow src --> tgt
# add = flow.permute(0, 2, 3, 1).cpu().numpy()[:n_logged]
# add -= add.min()
# add /= add.max()
# add = (add * 255.0).astype(np.uint8)
# add = np.concatenate([add, np.expand_dims(np.zeros_like(add).sum(-1), axis=-1)], axis=-1).astype(np.uint8)
# additional = additional + [np.concatenate([a for a in add], axis=1)]
add = vis_flow(flow[:n_logged])
add = np.concatenate(add,axis=1)
additional = additional + [add]
# compute ssim_img in grayscale
src = np.concatenate([s for s in src], axis=1)
#poke = np.concatenate([f for f in poke], axis=1)
pred = np.concatenate([p for p in pred], axis=1)
tgt = np.concatenate([t for t in tgt], axis=1)
grid = np.concatenate([src,poke,pred,tgt,*additional],axis=0)
return grid
def vis_flow(flow_map, normalize=False):
if isinstance(flow_map,torch.Tensor):
flow_map = flow_map.cpu().numpy()
flows_vis = []
for flow in flow_map:
hsv = np.zeros((*flow.shape[1:],3),dtype=np.uint8)
hsv[...,1] = 255
mag, ang = cv2.cartToPolar(flow[0], flow[1])
# since 360 is not valid for uint8, 180° corresponds to 360° for opencv hsv representation. Therefore, we're dividing the angle by 2 after conversion to degrees
hsv[...,0] = ang * 180 / np.pi / 2
hsv[...,2] = cv2.normalize(mag,None,alpha=0,beta=255, norm_type=cv2.NORM_MINMAX)
as_rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
if normalize:
as_rgb = as_rgb.astype(np.float) - as_rgb.min(axis=(0,1),keepdims=True)
as_rgb = (as_rgb / as_rgb.max(axis=(0,1),keepdims=True)*255.).astype(np.uint8)
flows_vis.append(as_rgb)
return flows_vis
def vis_flow_dense(flow_map,**kwargs):
if isinstance(flow_map,torch.Tensor):
flow_map = flow_map.cpu().numpy()
flows_vis = []
for flow in flow_map:
h, w = flow.shape[1:]
fx, fy = flow[0], flow[1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx * fx + fy * fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[..., 0] = ang * (180 / np.pi / 2)
hsv[..., 1] = 255
hsv[..., 2] = cv2.normalize(v,None,alpha=0,beta=255, norm_type=cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
flows_vis.append(bgr)
return flows_vis
def make_trf_video(img1,img2,v12,v21,poke,n_logged,logwandb=True,length_divisor=5):
"""
:param img1:
:param img2:
:param v12:
:param v21:
:param poke:
:param n_logged:
:param lomake_flow_grid()gwandb:
:param length_divisor:
:return:
"""
seq_len = v12.shape[1]
pokes = vis_flow(poke[:n_logged])
img1 = ((img1.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
img2 = ((img2.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
img1_with_arrow = []
img2_with_arrow = []
eps = 1e-6
for i, (poke_p, img1_i, img2_i) in enumerate(zip(poke[:n_logged], img1, img2)):
poke_points = np.nonzero(pokes[i].any(-1) > 0)
if poke_points[0].size == 0:
img1_with_arrow.append(img1_i)
img2_with_arrow.append(img2_i)
else:
min_y = np.amin(poke_points[0])
max_y = np.amax(poke_points[0])
min_x = np.amin(poke_points[1])
max_x = np.amax(poke_points[1])
# plot mean direction of flow in poke region
avg_flow = np.mean(poke_p[:, min_y:max_y, min_x:max_x].cpu().numpy(), axis=(1, 2))
arrow_dir = avg_flow / (np.linalg.norm(avg_flow) + eps) * (poke_p.shape[1] / length_divisor)
if not math.isnan(arrow_dir[0]) or not math.isnan(arrow_dir[1]):
arrow_start = (int((min_x + max_x) / 2), int((min_y + max_y) / 2))
arrow_end = (arrow_start[0] + int(arrow_dir[0]), arrow_start[1] + int(arrow_dir[1]))
img1_with_arrow.append(cv2.UMat.get(cv2.arrowedLine(cv2.UMat(img1_i), arrow_start, arrow_end, (255, 0, 0), max(int(img1_i.shape[0] / 64), 1))))
img2_with_arrow.append(cv2.UMat.get(cv2.arrowedLine(cv2.UMat(img2_i), arrow_start, arrow_end, (255, 0, 0), max(int(img2_i.shape[0] / 64), 1))))
else:
img1.append(img1_i)
img2.append(img2_i)
vid_st1 = np.concatenate(img1_with_arrow, axis=1)
vid_st2 = np.concatenate(img2_with_arrow, axis=1)
vid_st1 = put_text_to_video_row(np.stack([vid_st1] * seq_len, axis=0), "Image 1", color=(255, 0, 0))
vid_st2 = put_text_to_video_row(np.stack([vid_st2] * seq_len, axis=0), "Image 2", color=(255, 0, 0))
v12 = ((v12.permute(0, 1, 3, 4, 2).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
v12 = np.concatenate(list(v12), axis=2)
v12 = put_text_to_video_row(v12, "Vid: FG1-BG2")
v21 = ((v21.permute(0, 1, 3, 4, 2).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
v21 = np.concatenate(list(v21), axis=2)
v21 = put_text_to_video_row(v21, "Vid: FG2-BG1")
full = np.concatenate([vid_st1, vid_st2, v12, v21], axis=1)
if logwandb:
full = np.moveaxis(full, [0, 1, 2, 3], [0, 2, 3, 1])
return full
def draw_arrow(traj):
arrow_imgs = []
for c,t in enumerate(traj):
active_points = np.nonzero(t.astype(np.uint8).any(0) > 0)
img = np.zeros((*t.shape[1:],3),dtype=np.uint8)
if active_points[0].size>0:
for i in range(active_points[0].shape[0]):
y =active_points[0][i]
x = active_points[1][i]
arrow_dir = t[:,y,x]
if not math.isnan(arrow_dir[0]) or not math.isnan(arrow_dir[1]):
arrow_start = (x, y)
arrow_end = (int(np.clip(x + int(arrow_dir[0]),0,img.shape[0])), int(np.clip(y + int(arrow_dir[1]),0,img.shape[0])))
img = cv2.arrowedLine(img, arrow_start, arrow_end, (255, 0, 0), max(int(traj.shape[1] / 64), 1))
arrow_imgs.append(img)
arrow_imgs = np.concatenate(arrow_imgs,axis=1)
return arrow_imgs
def img_grid_ci(src,traj,pred,tgt,n_logged):
src = ((src.permute(0, 2, 3, 1).cpu().numpy()) * 255.).astype(np.uint8)[:n_logged]
# if prediction is image, just take the premuted image
pred = ((pred.permute(0, 2, 3, 1).cpu().numpy()) * 255).astype(
np.uint8)[:n_logged]
tgt = ((tgt.permute(0, 2, 3, 1).cpu().numpy()) * 255).astype(
np.uint8)[:n_logged]
src = np.concatenate([s for s in src], axis=1)
# poke = np.concatenate([f for f in poke], axis=1)
pred = np.concatenate([p for p in pred], axis=1)
tgt = np.concatenate([t for t in tgt], axis=1)
arrows = draw_arrow(traj[:n_logged].cpu().numpy())
grid = np.concatenate([src, arrows, pred, tgt], axis=0)
return grid
def make_video_ci(src,traj,pred,tgt,n_logged,logwandb=True, display_frame_nr=True):
seq_len = tgt.shape[1]
srcs = np.concatenate([s for s in ((src.permute(0, 2, 3, 1).cpu().numpy()) * 255.).astype(np.uint8)[:n_logged]],axis=1)
traj_vis = []
for t in range(traj.shape[1]):
arrows = draw_arrow(traj[:n_logged,t].cpu().numpy())
traj_vis.append(arrows)
traj_vis = np.stack(traj_vis,axis=0)
traj_vis = put_text_to_video_row(traj_vis, "Flow Vectors", display_frame_nr=display_frame_nr)
srcs = cv2.UMat.get(cv2.putText(cv2.UMat(srcs), f"Sequence length {seq_len}", (int(srcs.shape[1] // 3), int(srcs.shape[0] / 6)), cv2.FONT_HERSHEY_SIMPLEX,
float(srcs.shape[0] / 256), (255, 0, 0), int(srcs.shape[0] / 128)))
srcs = np.stack([srcs] * seq_len, axis=0)
srcs = put_text_to_video_row(srcs, "Input Image", display_frame_nr=display_frame_nr)
pred = ((pred.permute(0, 1, 3, 4, 2).cpu().numpy()) * 255).astype(np.uint8)[:n_logged]
pred = np.concatenate(list(pred), axis=2)
pred = put_text_to_video_row(pred, "Predicted Video", display_frame_nr=display_frame_nr)
tgt = ((tgt.permute(0, 1, 3, 4, 2).cpu().numpy()) * 255).astype(np.uint8)[:n_logged]
tgt = np.concatenate(list(tgt), axis=2)
tgt = put_text_to_video_row(tgt, "Groundtruth Video", display_frame_nr=display_frame_nr)
full = np.concatenate([srcs, pred, tgt, traj_vis], axis=1)
if logwandb:
full = np.moveaxis(full, [0, 1, 2, 3], [0, 2, 3, 1])
return full
def make_video(src,poke,pred,tgt,n_logged,flow=None,length_divisor=5,logwandb=True,flow_weights= None, display_frame_nr=False,invert_poke = False):
"""
:param src: src image
:param poke: poke, also input to the network
:param pred: predicted video of the network
:param tgt: target video the network was trained to reconstruct
:param n_logged: numvber of logged examples
:param flow: src flow from which the poke is originating
:param length_divisor: divisor for the length of the arrow, that's drawn ti visualize the mean direction of the flow within the poke patch
:param logwandb: whether the output video grid is intended to be logged with wandb or not (in this case the grid channels have to be changed)
:param flow_weights: Optional weights for the flow which are also displayed if they are not None.
:return:
"""
seq_len = tgt.shape[1]
src = ((src.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
pokes = vis_flow(poke[:n_logged])
flows_vis = None
if flow is not None:
flows = vis_flow(flow[:n_logged])
flows_with_rect = []
for i,(poke_p,flow) in enumerate(zip(pokes,flows)):
poke_points = np.nonzero(poke_p.any(-1) > 0)
if poke_points[0].size == 0:
flows_with_rect.append(np.zeros_like(flow))
else:
min_y = np.amin(poke_points[0])
max_y = np.amax(poke_points[0])
min_x = np.amin(poke_points[1])
max_x = np.amax(poke_points[1])
# draw rect
flow_with_rect = cv2.rectangle(flow,(min_x,min_y),(max_x,max_y),(255,255,255),max(1,int(flow.shape[0]//64)))
# flow_with_rect = cv2.UMat.get(cv2.putText(cv2.UMat(flow_with_rect), f"Flow Complete",(int(flow_with_rect.shape[1] // 3), int(5 * flow_with_rect.shape[0] / 6) ), cv2.FONT_HERSHEY_SIMPLEX,
# float(flow_with_rect.shape[0] / 256), (255, 255, 255), int(flow_with_rect.shape[0] / 128)))
flows_with_rect.append(flow_with_rect)
flow_cat = np.concatenate(flows_with_rect,axis=1)
flows_vis= [np.stack([flow_cat]*seq_len,axis=0)]
flows_vis[0] = put_text_to_video_row(flows_vis[0], "Flow Complete", color=(255, 255, 255))
if flow_weights is not None:
flow_weights = flow_weights.cpu().numpy()
heatmaps = []
for i, weight in enumerate(flow_weights):
weight_map = ((weight - weight.min()) / weight.max() * 255.).astype(np.uint8)
heatmap = cv2.applyColorMap(weight_map, cv2.COLORMAP_HOT)
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_RGB2BGR)
heatmaps.append(heatmap)
heatmaps = np.concatenate(heatmaps, axis=1)
heatmaps = np.stack([heatmaps]*seq_len, axis=0)
heatmaps = put_text_to_video_row(heatmaps, "Flow Weights", color=(255,255,255))
if flows_vis is None:
flows_vis = [heatmaps]
else:
flows_vis.insert(0,heatmaps)
srcs_with_arrow = []
pokes_with_arrow = []
if invert_poke:
srcs_with_arrow_inv = []
pokes_with_arrow_inv = []
eps = 1e-6
for i, (poke_p,src_i) in enumerate(zip(poke[:n_logged],src)):
poke_points = np.nonzero(pokes[i].any(-1) > 0)
if poke_points[0].size==0:
pokes_with_arrow.append(np.zeros_like(pokes[i]))
srcs_with_arrow.append(src_i)
else:
min_y = np.amin(poke_points[0])
max_y = np.amax(poke_points[0])
min_x = np.amin(poke_points[1])
max_x = np.amax(poke_points[1])
# plot mean direction of flow in poke region
avg_flow = np.mean(poke_p[:, min_y:max_y, min_x:max_x].cpu().numpy(), axis=(1, 2))
arrow_dir = avg_flow / (np.linalg.norm(avg_flow) + eps) * (poke_p.shape[1] / length_divisor)
if not math.isnan(arrow_dir[0]) or not math.isnan(arrow_dir[1]):
arrow_start = (int((min_x + max_x) / 2), int((min_y + max_y) / 2))
arrow_end = (arrow_start[0] + int(arrow_dir[0]), arrow_start[1] + int(arrow_dir[1]))
test = pokes[i]
# test = cv2.UMat.get(cv2.putText(cv2.UMat(test), f"Poke", (int(test.shape[1] // 3), int(5 * test .shape[0] / 6)), cv2.FONT_HERSHEY_SIMPLEX,
# float(test.shape[0] / 256), (255, 255, 255), int(test.shape[0] / 128)))
pokes_with_arrow.append(cv2.arrowedLine(test, arrow_start, arrow_end, (255, 0, 0), max(int(src_i.shape[0] / 64),1)))
srcs_with_arrow.append(cv2.UMat.get(cv2.arrowedLine(cv2.UMat(src_i), arrow_start, arrow_end, (255, 0, 0), max(int(src_i.shape[0] / 64),1))))
if invert_poke:
arrow_end_inv = (arrow_start[0] - int(arrow_dir[0]), arrow_start[1] - int(arrow_dir[1]))
pokes_with_arrow_inv.append(cv2.arrowedLine(test, arrow_start, arrow_end_inv, (0, 255, 0), max(int(src_i.shape[0] / 64), 1)))
srcs_with_arrow_inv.append(cv2.UMat.get(cv2.arrowedLine(cv2.UMat(src_i), arrow_start, arrow_end, (0, 255, 0), max(int(src_i.shape[0] / 64), 1))))
else:
pokes_with_arrow.append(np.zeros_like(pokes[i]))
srcs_with_arrow.append(src_i)
poke = np.concatenate(pokes_with_arrow, axis=1)
if invert_poke:
poke_inv = np.concatenate(pokes_with_arrow_inv, axis=1)
poke = put_text_to_video_row(np.stack([*[poke] * int(math.ceil(float(seq_len)/2)),*[poke_inv]*int(seq_len/2)], axis=0),"Pokes",color=(255,255,255))
else:
poke = put_text_to_video_row(np.stack([poke] * seq_len, axis=0),"Poke",color=(255,255,255))
if flows_vis is None:
flows_vis = [poke]
else:
flows_vis.append(poke)
srcs = np.concatenate(srcs_with_arrow,axis=1)
srcs = cv2.UMat.get(cv2.putText(cv2.UMat(srcs), f"Sequence length {seq_len}", (int(srcs.shape[1] // 3), int(srcs.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(srcs.shape[0] / 256), (255, 0, 0), int(srcs.shape[0] / 128)))
if invert_poke:
srcs_inv = np.concatenate(srcs_with_arrow_inv, axis=1)
srcs_inv = cv2.UMat.get(cv2.putText(cv2.UMat(srcs_inv), f"Sequence length {seq_len}", (int(srcs_inv.shape[1] // 3), int(srcs_inv.shape[0] / 6)), cv2.FONT_HERSHEY_SIMPLEX,
float(srcs_inv.shape[0] / 256), (255, 0, 0), int(srcs_inv.shape[0] / 128)))
srcs = np.stack([*[srcs] * int(math.ceil(float(seq_len)/2)),*[srcs_inv]*int(seq_len/2)],axis=0)
else:
srcs = np.stack([srcs]*seq_len,axis=0)
srcs = put_text_to_video_row(srcs,"Input Image",display_frame_nr=display_frame_nr)
pred = ((pred.permute(0, 1, 3, 4, 2).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
pred = np.concatenate(list(pred),axis=2)
pred = put_text_to_video_row(pred, "Predicted Video",display_frame_nr=display_frame_nr)
tgt = ((tgt.permute(0, 1, 3, 4, 2).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
tgt = np.concatenate(list(tgt),axis=2)
tgt = put_text_to_video_row(tgt,"Groundtruth Video",display_frame_nr=display_frame_nr)
full = np.concatenate([srcs,pred,tgt,*flows_vis],axis=1)
if logwandb:
full = np.moveaxis(full,[0,1,2,3],[0,2,3,1])
return full
def put_text_to_video_row(video_row,text, color = None,display_frame_nr=False):
written = []
for i,frame in enumerate(video_row):
current = cv2.UMat.get(cv2.putText(cv2.UMat(frame), text, (int(frame.shape[1] // 3), frame.shape[0] - int(frame.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(frame.shape[0] / 256), (255, 0, 0) if color is None else color, int(frame.shape[0] / 128)))
if display_frame_nr:
current = cv2.UMat.get(cv2.putText(cv2.UMat(current), str(i+1), (int(frame.shape[1] / 32), frame.shape[0] - int(frame.shape[0] / 6)), cv2.FONT_HERSHEY_SIMPLEX,
float(frame.shape[0] / 256), (255, 0, 0) if color is None else color, int(frame.shape[0] / 128)))
written.append(current)
return np.stack(written)
def make_animated_grid(src, poke, pred, tgt, n_logged, flow=None, length_divisor=5,logwandb=True):
# visualize flows
pokes = vis_flow(poke[:n_logged])
pokes_with_arrow = []
for i,poke_p in enumerate(poke[:n_logged]):
poke_points = np.nonzero(pokes[i].any(-1) > 0)
min_y = np.amin(poke_points[0])
max_y = np.amax(poke_points[0])
min_x = np.amin(poke_points[1])
max_x = np.amax(poke_points[1])
# plot mean direction of flow in poke region
avg_flow = np.mean(poke_p[:,min_y:max_y,min_x:max_x].cpu().numpy(), axis=(1, 2))
arrow_dir = avg_flow / np.linalg.norm(avg_flow) * (poke_p.shape[1] / length_divisor)
arrow_start = (int((min_x+max_x)/2),int((min_y+max_y)/2))
arrow_end = (arrow_start[0]+int(arrow_dir[0]),arrow_start[1]+int(arrow_dir[1]))
test = pokes[i]
pokes_with_arrow.append(cv2.arrowedLine(test,arrow_start,arrow_end,(0,0,255),2))
poke = np.concatenate(pokes_with_arrow, axis=1)
flows_vis = [np.stack([poke]*3,axis=0)]
if flow is not None:
flows = vis_flow(flow[:n_logged])
flows_with_rect = []
for i,(poke_p,flow) in enumerate(zip(pokes,flows)):
poke_points = np.nonzero(poke_p.any(-1) > 0)
min_y = np.amin(poke_points[0])
max_y = np.amax(poke_points[0])
min_x = np.amin(poke_points[1])
max_x = np.amax(poke_points[1])
# draw rect
flow_with_rect = cv2.rectangle(flow,(min_x,min_y),(max_x,max_y),(255,255,255),max(1,int(flow.shape[0]//64)))
flows_with_rect.append(flow_with_rect)
flow_cat = np.concatenate(flows_with_rect,axis=1)
flows_vis.insert(0,np.stack([flow_cat]*3,axis=0))
# visualize images
src = ((src.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
pred = ((pred.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
tgt = ((tgt.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
src = np.concatenate(list(src),axis=1)
pred = np.concatenate(list(pred), axis=1)
tgt = np.concatenate(list(tgt), axis=1)
src = cv2.UMat.get(cv2.putText(cv2.UMat(src), "Source", (int(src.shape[0] // 4), 30), cv2.FONT_HERSHEY_SIMPLEX,
float(src.shape[0] / 256), (0, 0, 0), int(src.shape[0] / 128)))
pred = cv2.UMat.get(cv2.putText(cv2.UMat(pred), "Predicted", (int(pred.shape[0] // 4), 30), cv2.FONT_HERSHEY_SIMPLEX,
float(pred.shape[0] / 256), (0, 0, 0), int(pred.shape[0] / 128)))
tgt = cv2.UMat.get(cv2.putText(cv2.UMat(tgt), "Target", (int(tgt.shape[0] // 4), 30), cv2.FONT_HERSHEY_SIMPLEX,
float(tgt.shape[0] / 256), (0, 0, 0), int(tgt.shape[0] / 128)))
animation = np.stack([src,pred,tgt],axis=0)
# this generates a video grid which can be used by wandb.Video()
full = np.concatenate([animation,*flows_vis],axis=1)
# wandb requires video to have shape (time, channels, height, width)
if logwandb:
full = np.moveaxis(full,[0,1,2,3],[0,2,3,1])
return full
def make_generic_grid(data, dtype, n_logged):
from utils.visualizer import FlowVisualizer
visualizer = FlowVisualizer()
final_data = []
assert(len(data)==len(dtype))
for i, batch in enumerate(data):
if dtype[i] == "flow":
add = batch.permute(0, 2, 3, 1).cpu().numpy()[:n_logged]
add -= add.min()
add /= add.max()
add = (add * 255.0).astype(np.uint8)
image = np.concatenate(
[add, np.expand_dims(np.zeros_like(add).sum(-1), axis=-1)], axis=-1).astype(np.uint8)
elif dtype[i] == "flow_3D":
add = batch.permute(0, 2, 3, 1).cpu().numpy()[:n_logged]
add -= add.min()
add /= add.max()
add = (add * 255.0).astype(np.uint8)
image = add
elif dtype[i] == "img":
image = ((batch.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
elif dtype[i] == "diff_flow_amplitude":
generated = batch[0][:n_logged].detach().cpu()
target = batch[1][:n_logged].detach().cpu()
image = visualizer.create_diff_amplitude(
visualizer.make_3d_to_2d(generated), visualizer.make_3d_to_2d(target))
image = (image*255).astype(np.uint8)[:, None].repeat(3, axis=1).transpose(0, 2, 3, 1)
elif dtype[i] == "diff_flow_direction":
generated = batch[0][:n_logged].detach().cpu()
target = batch[1][:n_logged].detach().cpu()
image = visualizer.create_diff_direction(
visualizer.make_3d_to_2d(generated), visualizer.make_3d_to_2d(target))
image = (image * 255).astype(np.uint8)[:, None].repeat(3, axis=1).transpose(0, 2, 3, 1)
elif dtype[i] == "diff_flow_clipped":
generated = batch[0][:n_logged].permute(0, 2, 3, 1).cpu().numpy()
target = batch[1][:n_logged].permute(0, 2, 3, 1).cpu().numpy()
image = np.sum(np.abs(generated-target), axis=-1)
image = (image[:, :, :, None]).astype(np.uint8)
image = np.clip(image, 0, 255)
image = np.repeat(image, 3, axis=-1)
elif dtype[i] == "diff_scaled":
generated = batch[0][:n_logged].permute(0, 2, 3, 1).cpu().numpy()
target = batch[1][:n_logged].permute(0, 2, 3, 1).cpu().numpy()
image = np.sum(np.abs(generated-target), axis=-1)
image /= image.max(axis=0)
image = (image[:, :, :, None]*255.0).astype(np.uint8)
image = np.repeat(image, 3, axis=-1)
image = np.concatenate([s for s in image], axis=1)
final_data.append(image)
grid = np.concatenate(final_data, axis=0)
return grid
def make_img_grid(appearance, shape, pred, tgt= None, n_logged=4, target_label="Target Images",
label_app = "Appearance Images", label_gen = "Generated Images", label_shape = "Shape Images"):
"""
:param appearance:
:param shape:
:param pred:
:param tgt:
:param n_logged:
:return:
"""
appearance = ((appearance.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
shape = ((shape.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
pred = ((pred.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(
np.uint8)[:n_logged]
if tgt is not None:
tgt = ((tgt.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(
np.uint8)[:n_logged]
tgt = np.concatenate([t for t in tgt], axis=1)
tgt = cv2.UMat.get(cv2.putText(cv2.UMat(tgt), target_label , (int(tgt.shape[1] // 3), tgt.shape[0] - int(tgt.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(tgt.shape[0] / 256), (255, 0, 0), int(tgt.shape[0] / 128)))
appearance = np.concatenate([s for s in appearance], axis=1)
appearance = cv2.UMat.get(cv2.putText(cv2.UMat(appearance),label_app, (int(appearance.shape[1] // 3), appearance.shape[0] - int(appearance.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(appearance.shape[0] / 256), (255, 0, 0), int(appearance.shape[0] / 128)))
shape = np.concatenate([f for f in shape], axis=1)
shape = cv2.UMat.get(cv2.putText(cv2.UMat(shape), label_shape, (int(shape.shape[1] // 3), shape.shape[0] - int(shape.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(shape.shape[0] / 256), (255, 0, 0), int(shape.shape[0] / 128)))
pred = np.concatenate([p for p in pred], axis=1)
pred = cv2.UMat.get(cv2.putText(cv2.UMat(pred), label_gen, (int(pred.shape[1] // 3), pred.shape[0] - int(pred.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(pred.shape[0] / 256), (255, 0, 0), int(pred.shape[0] / 128)))
if tgt is None:
grid = np.concatenate([appearance, shape, pred], axis=0)
else:
grid = np.concatenate([appearance, shape, pred, tgt], axis=0)
return grid
def scale_img(img):
"""
Rescales an image to the actual pixel domain in [0,255] and converts it to the dtype
:param img:
:return:
"""
img = (img + 1.) * 127.5
if isinstance(img, torch.Tensor):
img = img.to(torch.uint8)
else:
# assumed to be numpy array
img = img.astype(np.uint8)
return img
def human_graph_cut_map(img, poke_size):
import cv2
import matplotlib.pyplot as plt
# make backgound foreground segementation
mask = np.zeros(img.shape[:2], np.uint8)
rect = (int(img.shape[1] / 5),
poke_size,
int(3. * img.shape[1] / 5),
int(img.shape[0] - 2 * poke_size))
fgm = np.zeros((1, 65), dtype=np.float64)
bgm = np.zeros((1, 65), dtype=np.float64)
mask2, fgm, bgm = cv2.grabCut(img, mask, rect, fgm, bgm, 5, cv2.GC_INIT_WITH_RECT)
mask3 = np.where((mask2 == 2) | (mask2 == 0), 0, 1).astype(np.bool)
tuples = np.where(mask3[:, :])
return tuples
new_img = np.zeros_like(img)
for t in tuples:
for i in range(3):
new_img[t[0], t[1], i] = 255
# show the output frame
plt.imshow(new_img)
plt.show()
# human_NN_map_weights = "/export/home/jsieber/poking/models/mask-rcnn-coco/frozen_inference_graph.pb"
# human_NN_map_classes = "/export/home/jsieber/poking/models/mask-rcnn-coco/object_detection_classes_coco.txt"
# human_NN_map_config = "/export/home/jsieber/poking/models/mask-rcnn-coco/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt"
# human_NN_map_LABELS = open(human_NN_map_classes).read().strip().split("\n")
#
# human_NN_map_net = cv2.dnn.readNetFromTensorflow(human_NN_map_weights, human_NN_map_config)
# def human_NN_map(frame, conf=0.5, threshold=0.3):
#
#
# # modified from https://www.pyimagesearch.com/2018/11/26/instance-segmentation-with-opencv/
# before_width = frame.shape[1]
# frame = imutils.resize(frame, width=600)
# (H, W) = frame.shape[:2]
#
# # construct a blob from the input image and then perform a
# # forward pass of the Mask R-CNN, giving us (1) the bounding
# # box coordinates of the objects in the image along with (2)
# # the pixel-wise segmentation for each specific object
# blob = cv2.dnn.blobFromImage(frame, swapRB=True, crop=False)
# human_NN_map_net.setInput(blob)
# (boxes, masks) = human_NN_map_net.forward(["detection_out_final",
# "detection_masks"])
#
# # sort the indexes of the bounding boxes in by their corresponding
# # prediction probability (in descending order)
# idxs = np.argsort(boxes[0, 0, :, 2])[::-1]
#
# # initialize the mask, ROI, and coordinates of the person for the
# # current frame
# mask = None
# roi = None
# coords = None
#
# # loop over the indexes
# for i in idxs:
# # extract the class ID of the detection along with the
# # confidence (i.e., probability) associated with the
# # prediction
# classID = int(boxes[0, 0, i, 1])
# confidence = boxes[0, 0, i, 2]
#
# # if the detection is not the 'person' class, ignore it
# if human_NN_map_LABELS[classID] != "person":
# continue
# # filter out weak predictions by ensuring the detected
# # probability is greater than the minimum probability
# if confidence > conf:
# # scale the bounding box coordinates back relative to the
# # size of the image and then compute the width and the
# # height of the bounding box
# box = boxes[0, 0, i, 3:7] * np.array([W, H, W, H])
# (startX, startY, endX, endY) = box.astype("int")
# coords = (startX, startY, endX, endY)
# boxW = endX - startX
# boxH = endY - startY
#
# # extract the pixel-wise segmentation for the object,
# # resize the mask such that it's the same dimensions of
# # the bounding box, and then finally threshold to create
# # a *binary* mask
# mask = masks[i, classID]
# mask = cv2.resize(mask, (boxW, boxH),
# interpolation=cv2.INTER_NEAREST)
# mask = (mask > threshold)
#
# # extract the ROI and break from the loop (since we make
# # the assumption there is only *one* person in the frame
# # who is also the person with the highest prediction
# # confidence)
# roi = frame[startY:endY, startX:endX][mask]
# break
#
# # initialize our output frame
# output = frame.copy()
#
# # if the mask is not None *and* we are in privacy mode, then we
# # know we can apply the mask and ROI to the output image
# if mask is not None:
# # blur the output frame
# output = np.zeros_like(output)
#
# # add the ROI to the output frame for only the masked region
# (startX, startY, endX, endY) = coords
# roi = np.ones_like(roi)*255
# output[startY:endY, startX:endX][mask] = roi
# output = imutils.resize(output, width=before_width)
#
# tuples = np.where(output[:, :, 0] > 0)
# return tuples
#
# new_img = np.zeros_like(output)
# for t in tuples:
# for i in range(3):
# new_img[t[0], t[1], i] = 255
# # show the output frame
# plt.imshow(new_img)
# plt.show()
def make_hist(hist, title, ylabel, xlabel="Frame number", bins_edges = None):
plt.ioff()
if bins_edges is None:
bins_edges = np.arange(0, len(hist) + 1).astype(np.float)
else:
assert len(list(bins_edges)) == len(list(hist)) + 1
centroids = (bins_edges[1:] + bins_edges[:-1]) / 2
hist_, bins_, _ = plt.hist(
centroids,
bins=len(hist),
weights=np.asarray(hist),
range=(min(bins_edges), max(bins_edges)),
)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
wandb.log({title: wandb.Image(plt)})
plt.close()
def make_plot(x,y,title,ylabel,xlabel="frame idx", savename=None):
plt.ioff()
fig, ax = plt.subplots()
ax.plot(x,y,'rv-')
ax.set(xlabel=xlabel, ylabel=ylabel, title = title)
ax.grid()
if savename is None:
wandb.log({title:wandb.Image(plt)})
else:
fig.savefig(savename)
plt.close()
if __name__=="__main__":
frame_path = "/export/data/ablattma/Datasets/iPER/processed/001_10_1/frame_277.png"
frame = cv2.imread(frame_path)
frame = imutils.resize(frame, width=128)
import time
for i in range(3):
start_time = time.time()
human_graph_cut_map(frame, 15)
print("--- %s seconds ---" % (time.time() - start_time))
for i in range(3):
start_time = time.time()
human_NN_map(frame)
print("--- %s secondss ---" % (time.time() - start_time))
| 33,806 | 43.424442 | 204 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/flownet_loader.py | import torch
from torch.nn import functional as F
from PIL import Image
from models.flownet2.models import *
from torchvision import transforms
import matplotlib.pyplot as plt
import argparse
from utils.general import get_gpu_id_with_lowest_memory
class FlownetPipeline:
def __init__(self):
super(FlownetPipeline, self).__init__()
def load_flownet(self, args, device):
"""
:param args: args from argparser
:return: The flownet pytorch model
"""
# load model savefile
save = torch.load(
"/export/scratch/compvis/datasets/plants/pretrained_models/FlowNet2_checkpoint.pth.tar")
model = FlowNet2(args, batchNorm=False)
untrained_statedict = model.state_dict()
# load it into proper clean model
model.load_state_dict(save["state_dict"])
model.eval()
return model.to(device)
def preprocess_image(self, img, img2, channelOrder="RGB",spatial_size= None):
""" This preprocesses the images for FlowNet input. Preserves the height and width order!
:param channelOrder: RGB(A) or BGR
:param img: The first image in form of (W x H x RGBA) or (H x W x RGBA)
:param img2: The first image in form of (W x H x RGBA) or (H x W x RGBA)
:return: The preprocessed input for the prediction (BGR x Img# x W x H) or (BGR x Img# x H x W)
"""
# ToTensor transforms from (H x W x C) => (C x H x W)
# also automatically casts into range [0, 1]
if spatial_size is None:
img, img2 = transforms.ToTensor()(img)[:3], transforms.ToTensor()(img2)[:3]
else:
ts = transforms.Compose([transforms.ToPILImage(), transforms.Resize(size=spatial_size,interpolation=Image.BILINEAR),transforms.ToTensor()])
img, img2 = ts(img)[:3],ts(img2)[:3]
if channelOrder == "RGB":
img, img2 = img[[2, 1, 0]], img2[[2, 1, 0]]
# Cast to proper shape (Batch x BGR x #Img x H x W)
s = img.shape
img, img2 = img[:, :int(s[1] / 64) * 64, :int(s[2] / 64) * 64], \
img2[:, :int(s[1] / 64) * 64,:int(s[2] / 64) * 64]
stacked = torch.cat([img[:, None], img2[:, None]], dim=1)
return stacked
def predict(self, model, stacked, spatial_size=None):
"""
:param stacked: The two input images. (Batch x BGR x Img# x H x W)
:return: The flow result (2 x W x H)
"""
# predict
model.eval()
prediction = model(stacked)
out_size = float(prediction.shape[-1])
if spatial_size is not None:
prediction = F.interpolate(
prediction.cpu(), size=(spatial_size,spatial_size), mode="bilinear"
)
# rescale to make it fit to new shape (not grave, if this is skipped as flow is normalized anyways later)
prediction = prediction / (out_size / spatial_size)
flow = prediction[0]
return flow
def show_results(self, prediction, with_ampl=False):
"""
prediction (Tensor): The predicted flow (2 x W x H)
:return: plots
"""
zeros = torch.zeros((1, prediction.shape[1], prediction.shape[2]))
if with_ampl:
ampl = torch.sum(prediction * prediction, dim=0)
ampl = ampl.squeeze()
else:
ampl = torch.cat([prediction, zeros], dim=0)
ampl -= ampl.min()
ampl /= ampl.max()
# show image
im = transforms.ToPILImage()(ampl)
if with_ampl:
plt.imshow(im, cmap='gray')
else:
plt.imshow(im)
if __name__ == "__main__":
# parse args
parser = argparse.ArgumentParser(description='Process some integers.')
# always 1.0, because pytorch toTensor automatically converts into range [0.0, 1.0]
parser.add_argument("--rgb_max", type=float, default=1.)
parser.add_argument('--fp16', action='store_true', help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).')
parser.add_argument('--fp16_scale', type=float, default=1024.,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
args = parser.parse_args()
# load test images in BGR mode
img, img2 = np.asarray(Image.open(f"/export/data/ablattma/Datasets/plants/processed/hoch_misc1/frame_0.png")), \
np.asarray(Image.open(f"/export/data/ablattma/Datasets/plants/processed/hoch_misc1/frame_100.png"))
# load Flownet
pipeline = FlownetPipeline()
flownet_device = get_gpu_id_with_lowest_memory()
flownet = pipeline.load_flownet(args, flownet_device)
# process to show flow
stacked = pipeline.preprocess_image(img, img2).to(flownet_device)
prediction = pipeline.predict(flownet, stacked[None]).cpu()
pipeline.show_results(prediction)
plt.show() | 4,882 | 37.753968 | 151 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/metrics.py | import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models import inception_v3
import numpy as np
from scipy import linalg
from skimage.metrics import peak_signal_noise_ratio as compare_psnr
from skimage.metrics import structural_similarity as ssim
from pytorch_lightning.metrics import functional as PF
from tqdm import tqdm
class FIDInceptionModel(nn.Module):
def __init__(self, normalize_range=True):
super().__init__()
self.v3 = inception_v3(pretrained=True,aux_logits=False)
# self.v3.aux_logits = False
self.register_buffer(
"mean",
torch.tensor([0.485, 0.456, 0.406], dtype=torch.float)
.unsqueeze(dim=0)
.unsqueeze(dim=-1)
.unsqueeze(dim=-1),
)
self.register_buffer(
"std",
torch.tensor([0.229, 0.224, 0.225], dtype=torch.float)
.unsqueeze(dim=0)
.unsqueeze(dim=-1)
.unsqueeze(dim=-1),
)
self.resize = nn.Upsample(size=(299,299),mode="bilinear")
self.normalize_range = normalize_range
def forward(self, x):
x = self.resize(x)
if self.normalize_range:
# normalize in between 0 and 1
x = (x + 1.) / 2.
else:
x = x.to(torch.float) / 255.
# normalize to demanded values
x = (x - self.mean) / self.std
# this reimpleents the respective layers of the inception model, see model definition
for name, submodule in self.v3._modules.items():
x = submodule(x)
if name == "Mixed_7c":
break
elif name == "Conv2d_4a_3x3" or name == "Conv2d_2b_3x3":
x = F.avg_pool2d(x, kernel_size=3, stride=2)
out = F.adaptive_avg_pool2d(x, (1, 1))
out = torch.flatten(out, 1)
return out
def metrcis_MAE(tensor1, tensor2, mean=True):
value = torch.sum(torch.abs(tensor1 - tensor2))
if mean:
value /= tensor1.view((-1)).shape[0]
return value.data.cpu().numpy()
def metrcis_MSE(tensor1, tensor2, mean=True):
diff = tensor1 - tensor2
value = torch.sum(diff * diff)
if mean:
value /= tensor1.view((-1)).shape[0]
return value.data.cpu().numpy()
def metrcis_l1(tensor1, tensor2, mean=False):
return metrcis_MAE(tensor1, tensor2, mean)
def metrcis_l2(tensor1, tensor2, mean=False):
diff = tensor1 - tensor2
value = torch.sum(diff * diff)
value = torch.sqrt(value)
if mean:
value /= tensor1.view((-1)).shape[0]
return value.data.cpu().numpy()
def metric_ssim(real, fake, reduce = True, return_per_frame=False):
if real.dim() == 3:
real = real[None,None]
fake = fake[None,None]
elif real.dim() == 4:
real = real[None]
fake = fake[None]
# rescale to valid range
real = ((real + 1.) / 2.).permute(0, 1, 3, 4, 2).cpu().numpy()
fake = ((fake + 1.) / 2.).permute(0, 1, 3, 4, 2).cpu().numpy()
ssim_batch = np.asarray([ssim(rimg, fimg, multichannel=True, data_range=1.0,
gaussian_weights=True,use_sample_covariance=False, ) for rimg, fimg in zip(real.reshape(-1,*real.shape[2:]),
fake.reshape(-1,*fake.shape[2:]))])
if return_per_frame:
ssim_per_frame = {}
for i in range(real.shape[1]):
real_test = real[:,i]
fake_test = fake[:,i]
ssim_per_frame[i] = np.asarray([ssim(real_test, fake_test,
multichannel=True, data_range=1., gaussian_weights=True, use_sample_covariance=False)])
# ssim_per_frame = {i:np.asarray([ssim(real[:,i], fake[:,i],
# multichannel=True, data_range=1., gaussian_weights=True, use_sample_covariance=False)]) for i in range(real.shape[1])}
if reduce:
if return_per_frame:
ssim_pf_reduced = {key: ssim_per_frame[key] for key in ssim_per_frame}
return np.mean(ssim_batch), ssim_pf_reduced
else:
return np.mean(ssim_batch)
if return_per_frame:
return ssim_batch, ssim_per_frame
else:
return ssim_batch
def ssim_lightning(real, fake, return_per_frame=False, normalize_range=True):
if real.dim() == 3:
real = real[None, None]
fake = fake[None, None]
elif real.dim() == 4:
real = real[None]
fake = fake[None]
if normalize_range:
real = (real + 1.) /2.
fake = (fake + 1.) / 2.
ssim_batch = PF.ssim(fake.reshape(-1,*fake.shape[2:]),real.reshape(-1,*real.shape[2:])).cpu().numpy()
if return_per_frame:
ssim_per_frame = {i: PF.ssim(fake[:,i],real[:,i]).cpu().numpy() for i in range(real.shape[1])}
return ssim_batch, ssim_per_frame
return ssim_batch
def psnr_lightning(real, fake, return_per_frame=False, normalize_range=True):
if real.dim() == 3:
real = real[None, None]
fake = fake[None, None]
elif real.dim() == 4:
real = real[None]
fake = fake[None]
if normalize_range:
real = (real + 1.) / 2.
fake = (fake + 1.) / 2.
psnr_batch = PF.psnr(fake.reshape(-1, *fake.shape[2:]), real.reshape(-1, *real.shape[2:])).cpu().numpy()
if return_per_frame:
psnr_per_frame = {i: PF.psnr(fake[:, i].contiguous(), real[:, i].contiguous()).cpu().numpy() for i in range(real.shape[1])}
return psnr_batch, psnr_per_frame
return psnr_batch
def metric_psnr(im_true, im_test,reduce = True, return_per_frame=False):
if im_true.dim() == 3:
im_true, im_test = im_true[None,None], im_test[None,None]
elif im_true.dim() == 4:
im_true, im_test = im_true[None], im_test[None]
# make channel last
real = ((im_true + 1.) / 2.).permute(0, 1, 3, 4, 2).cpu().numpy()
fake = ((im_test + 1.) / 2.).permute(0, 1, 3, 4, 2).cpu().numpy()
psnr_batch = np.asarray([compare_psnr(r,f, data_range=1.) for r, f in zip(real.reshape(-1,*real.shape[2:]),fake.reshape(-1,*fake.shape[2:]))])
if return_per_frame:
psnr_per_frame = {i: np.asarray([compare_psnr(real[:,i], fake[:,i], data_range=1.)]) for i in range(real.shape[1])}
if reduce:
if return_per_frame:
psnr_pf_reduced = {key: psnr_per_frame[key] for key in psnr_per_frame}
return np.mean(psnr_batch), psnr_pf_reduced
else:
return np.mean(psnr_batch)
if return_per_frame:
return psnr_batch, psnr_per_frame
else:
return psnr_batch
def metric_lpips(real, fake, lpips_func, reduce=True, return_per_frame=False, normalize=False):
if real.dim() == 3:
real, fake = real[None,None], fake[None,None]
elif real.dim() == 4:
real, fake = real[None], fake[None]
if normalize:
if fake.max() > 1:
fake = (fake.to(torch.float) / 127.5) - 1.
real = (real.to(torch.float) / 127.5) -1.
else:
real = (real * 2.) - 1.
fake = (fake * 2.) - 1.
lpips_batch = lpips_func(real.reshape(-1,*real.shape[2:]),fake.reshape(-1,*fake.shape[2:])).squeeze().cpu().numpy()
if return_per_frame:
lpips_per_frame = {i: lpips_func(real[:,i],fake[:,i]).squeeze().cpu().numpy() for i in range(real.shape[1])}
if reduce:
if return_per_frame:
lpips_pf_reduced = {key: lpips_per_frame[key].mean() for key in lpips_per_frame}
return lpips_batch.mean(), lpips_pf_reduced
else:
return lpips_batch.mean()
if return_per_frame:
return lpips_batch, lpips_per_frame
else:
return lpips_batch
def mean_cov(features):
mu = np.mean(features, axis=0)
cov = np.cov(features, rowvar=False)
return mu,cov
def metric_fid(real_features, fake_features, eps=1e-6):
# Taken and adapted from https://github.com/mseitzer/pytorch-fid/blob/master/fid_score.py
if not isinstance(real_features,np.ndarray):
real_features = np.concatenate(real_features,axis=0)
fake_features = np.concatenate(fake_features,axis=0)
mu1, cov1 = mean_cov(real_features)
mu2, cov2 = mean_cov(fake_features)
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(cov1)
sigma2 = np.atleast_2d(cov2)
assert (
mu1.shape == mu2.shape
), "Training and test mean vectors have different lengths"
assert (
sigma1.shape == sigma2.shape
), "Training and test covariances have different dimensions"
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = f"fid calculation produces singular product; adding {eps} to diagonal of cov estimates"
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError(f"Imaginary component {m}")
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
def compute_fvd(real_videos,fake_videos, device,logger):
import silence_tensorflow.auto
import tensorflow.compat.v1 as tf
from utils.frechet_video_distance import preprocess,Embedder,calculate_fvd
# required for fvd computation
# config = tf.ConfigProto()
# config.gpu_options.visible_device_list = f"{device}"
devs = tf.config.experimental.get_visible_devices("GPU")
target_dev = [d for d in devs if d.name.endswith(str(device))][0]
tf.config.experimental.set_visible_devices(target_dev, 'GPU')
logger.info("Compute fvd score.")
#dev = f"/gpu:{device}"
logger.info(f"using device {device}")
with tf.device("/gpu:0"):
with tf.Graph().as_default():
# construct graph
sess = tf.Session()
input_shape = real_videos[0].shape
input_real = tf.placeholder(dtype=tf.uint8, shape=input_shape)
input_fake = tf.placeholder(dtype=tf.uint8, shape=input_shape)
real_pre = preprocess(input_real, (224, 224))
emb_real = Embedder(real_pre)
embed_real = emb_real.create_id3_embedding(real_pre)
fake_pre = preprocess(input_fake, (224, 224))
emb_fake = Embedder(fake_pre)
embed_fake = emb_fake.create_id3_embedding(fake_pre)
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
real, fake = [], []
for rv, fv in tqdm(zip(real_videos, fake_videos)):
# real_batch = ((rv + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy()
# fake_batch = ((fv + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy()
# real_batch = ((rv + 1.) * 127.5).cpu().numpy()
# fake_batch = ((fv + 1.) * 127.5).cpu().numpy()
feed_dict = {input_real: rv, input_fake: fv}
r, f = sess.run([embed_fake, embed_real], feed_dict)
real.append(r)
fake.append(f)
print('Compute FVD score')
real = np.concatenate(real, axis=0)
fake = np.concatenate(fake, axis=0)
embed_real = tf.placeholder(dtype=tf.float32, shape=(real.shape[0], 400))
embed_fake = tf.placeholder(dtype=tf.float32, shape=(real.shape[0], 400))
result = calculate_fvd(embed_real, embed_fake)
feed_dict = {embed_real: real, embed_fake: fake}
fvd_val = sess.run(result, feed_dict)
sess.close()
logger.info(f"Results of fvd computation: fvd={fvd_val}")
# for being sure
return fvd_val
if __name__ == "__main__":
z, o = torch.rand((1080, 720, 3)), torch.rand((1080, 720, 3))
o[0, 0, 0], o[1, 0, 0] = 0, 1
z[0, 0, 0], z[1, 0, 0] = 0, 1
| 12,279 | 33.985755 | 153 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/general.py | import torch
import os
import subprocess
import logging
import yaml
import logging.config
import inspect
from os import walk
import numpy as np
import coloredlogs
import multiprocessing as mp
from threading import Thread
from queue import Queue
from collections import abc
import cv2
from torch import nn
# import kornia
def get_member(model, name):
if isinstance(model, nn.DataParallel):
module = model.module
else:
module = model
return getattr(module, name)
def convert_flow_2d_to_3d(flow):
amplitude = torch.sqrt(torch.sum(flow * flow, dim=0, keepdim=True))
# fix division by zero
scaler = amplitude.clone()
scaler[scaler==0.0] = 1.0
flow = flow/scaler
flow = torch.cat([flow, amplitude], dim=0)
# n_flow = torch.sqrt(torch.sum(flow[:2] * flow[:2], dim=0))
# print(torch.max(n_flow.view((-1))))
return flow
def convert_flow_2d_to_3d_batch(flows):
final = []
for flow in flows:
converted = convert_flow_2d_to_3d(flow)
final.append(converted[None])
final = torch.cat(final, dim=0)
return final
def get_flow_gradients(flow, device=None):
"""torch in, torch out"""
flow = flow[:, None]
sobel = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]
sobel_kernel_y = torch.tensor(sobel, dtype=torch.float32).unsqueeze(0)
sobel_kernel_x = torch.transpose(sobel_kernel_y, 1, 2)
sobel_kernel_x, sobel_kernel_y = sobel_kernel_x.expand((1, 1, 3, 3)), sobel_kernel_y.expand((1, 1, 3, 3))
if flow.is_cuda:
sobel_kernel_x, sobel_kernel_y = sobel_kernel_x.to(flow.get_device()), sobel_kernel_y.to(flow.get_device())
gradient_d1_x = torch.nn.functional.conv2d(flow, sobel_kernel_x, stride=1, padding=1)
gradient_d2_x = torch.nn.functional.conv2d(gradient_d1_x, sobel_kernel_x, stride=1, padding=1)
gradient_d1_y = torch.nn.functional.conv2d(flow, sobel_kernel_y, stride=1, padding=1)
gradient_d2_y = torch.nn.functional.conv2d(gradient_d1_y, sobel_kernel_y, stride=1, padding=1)
gradient_d1_x, gradient_d2_x, gradient_d1_y, gradient_d2_y = gradient_d1_x.squeeze(),\
gradient_d2_x.squeeze(),\
gradient_d1_y.squeeze(),\
gradient_d2_y.squeeze()
gradient_d1_x = torch.sqrt(torch.sum(gradient_d1_x ** 2, dim=0))
gradient_d1_y = torch.sqrt(torch.sum(gradient_d1_y ** 2, dim=0))
gradient_d2_x = torch.sqrt(torch.sum(gradient_d2_x ** 2, dim=0))
gradient_d2_y = torch.sqrt(torch.sum(gradient_d2_y ** 2, dim=0))
return gradient_d1_x, gradient_d1_y, gradient_d2_x, gradient_d2_y
def get_flow_gradients_batch(flows):
final = []
for flow in flows:
gradient_d1_x, gradient_d1_y, gradient_d2_x, gradient_d2_y = get_flow_gradients(flow)
all_gradients = [gradient_d1_x,
gradient_d1_y,
gradient_d2_x,
gradient_d2_y]
stacked = torch.stack(all_gradients, dim=0).squeeze(dim=0)
final.append(stacked)
final = torch.stack(final, dim=0).squeeze(dim=0)
return final
class LoggingParent:
def __init__(self):
super(LoggingParent, self).__init__()
# find project root
mypath = inspect.getfile(self.__class__)
mypath = "/".join(mypath.split("/")[:-1])
found = False
while mypath!="" and not found:
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
f.extend(filenames)
break
if ".gitignore" in f:
found = True
continue
mypath = "/".join(mypath.split("/")[:-1])
project_root = mypath+"/"
# Put it together
file = inspect.getfile(self.__class__).replace(project_root, "").replace("/", ".").split(".py")[0]
cls = str(self.__class__)[8:-2]
cls = str(cls).replace("__main__.", "").split(".")[-1]
self.logger = get_logger(f"{file}.{cls}")
def get_gpu_id_with_lowest_memory(index=0, target_gpus:list=None):
# get info from nvidia-smi
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.free',
'--format=csv,nounits,noheader'
], encoding='utf-8')
gpu_memory = [int(x) for x in result.strip().split('\n')]
# get the one with the lowest usage
if target_gpus is None:
indices = np.argsort(gpu_memory)
else:
indices = [i for i in np.argsort(gpu_memory) if i in target_gpus]
return torch.device(f"cuda:{indices[-index-1]}")
iuhihfie_logger_loaded = False
def get_logger(name):
# setup logging
global iuhihfie_logger_loaded
if not iuhihfie_logger_loaded:
with open(f'{os.path.dirname(os.path.abspath(__file__))}/logging.yaml', 'r') as f:
log_cfg = yaml.load(f.read(), Loader=yaml.FullLoader)
logging.config.dictConfig(log_cfg)
iuhihfie_logger_loaded = True
logger = logging.getLogger(name)
coloredlogs.install(logger=logger, level="DEBUG")
return logger
def save_model_to_disk(path, models, epoch):
for i, model in enumerate(models):
tmp_path = path
if not os.path.exists(path):
os.makedirs(path)
tmp_path = tmp_path + f"model_{i}-epoch{epoch}"
torch.save(model.state_dict(), tmp_path)
def _do_parallel_data_prefetch(func, Q, data, idx):
# create dummy dataset instance
# run prefetching
res = func(data)
Q.put([idx, res])
Q.put("Done")
def parallel_data_prefetch(
func: callable, data, n_proc, target_data_type="ndarray",cpu_intensive=True
):
if target_data_type not in ["ndarray", "list"]:
raise ValueError(
"Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray."
)
if isinstance(data, np.ndarray) and target_data_type == "list":
raise ValueError("list expected but function got ndarray.")
elif isinstance(data, abc.Iterable):
if isinstance(data, dict):
print(
f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.'
)
data = list(data.values())
if target_data_type == "ndarray":
data = np.asarray(data)
else:
data = list(data)
else:
raise TypeError(
f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}."
)
if cpu_intensive:
Q = mp.Queue(1000)
proc = mp.Process
else:
Q = Queue(1000)
proc = Thread
# spawn processes
if target_data_type == "ndarray":
arguments = [
[func, Q, part, i]
for i, part in enumerate(np.array_split(data, n_proc))
]
else:
step = (
int(len(data) / n_proc + 1)
if len(data) % n_proc != 0
else int(len(data) / n_proc)
)
arguments = [
[func, Q, part, i]
for i, part in enumerate(
[data[i : i + step] for i in range(0, len(data), step)]
)
]
processes = []
for i in range(n_proc):
p = proc(target=_do_parallel_data_prefetch, args=arguments[i])
processes += [p]
# start processes
print(f"Start prefetching...")
import time
start = time.time()
gather_res = [[] for _ in range(n_proc)]
try:
for p in processes:
p.start()
k = 0
while k < n_proc:
# get result
res = Q.get()
if res == "Done":
k += 1
else:
gather_res[res[0]] = res[1]
except Exception as e:
print("Exception: ", e)
for p in processes:
p.terminate()
raise e
finally:
for p in processes:
p.join()
print(f"Prefetching complete. [{time.time() - start} sec.]")
if not isinstance(gather_res[0], np.ndarray):
return np.concatenate([np.asarray(r) for r in gather_res], axis=0)
# order outputs
return np.concatenate(gather_res, axis=0)
def linear_var(
act_it, start_it, end_it, start_val, end_val, clip_min, clip_max
):
act_val = (
float(end_val - start_val) / (end_it - start_it) * (act_it - start_it)
+ start_val
)
return np.clip(act_val, a_min=clip_min, a_max=clip_max)
def get_patches(seq_batch,weights,config,fg_value, logger = None):
"""
:param seq_batch: Batch of videos
:param weights: batch of flow weights for the videos
:param config: config, containing spatial_size
:param fg_value: foreground value of the weight map
:return:
"""
import kornia
weights_as_bool = torch.eq(weights,fg_value)
cropped = []
for vid,weight in zip(seq_batch,weights_as_bool):
vid_old = vid
weight_ids = torch.nonzero(weight,as_tuple=True)
try:
min_y = weight_ids[0].min()
max_y = weight_ids[0].max()
min_x = weight_ids[1].min()
max_x = weight_ids[1].max()
vid = vid[...,min_y:max_y,min_x:max_x]
if len(vid.shape) < 4:
data_4d = vid[None,...]
vid = kornia.transform.resize(data_4d, config["spatial_size"])
cropped.append(vid.squeeze(0))
else:
vid = kornia.transform.resize(vid,config["spatial_size"])
cropped.append(vid)
except Exception as e:
if logger is None:
print(e)
else:
logger.warn(f'Catched the following exception in "get_patches": {e.__class__.__name__}: {e}. Skip patching this sample...')
cropped.append(vid_old)
return torch.stack(cropped,dim=0)
if __name__ == "__main__":
print(get_gpu_id_with_lowest_memory())
| 10,061 | 33.108475 | 139 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/flow_dataset.py | from os import path
import numpy as np
import pickle
from copy import deepcopy
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset
from torchvision import transforms as tt
from tqdm import tqdm
import cv2
from natsort import natsorted
import os
from glob import glob
from utils.general import parallel_data_prefetch, LoggingParent
from data.helper_functions import preprocess_image
from data.base_dataset import BaseDataset
class PlantDataset(BaseDataset):
def __init__(self, transforms, datakeys, config, train=True, google_imgs=False, n_ref_frames=None):
self.excluded_objects = config["excluded_objects"] if "excluded_objects" in config else []
super().__init__(transforms, datakeys, config,train=train)
self.logger.info(f"Initializing {self.__class__.__name__}.")
if self.config["spatial_size"][0] <= 256:
self.flow_in_ram = self.config["flow_in_ram"] if "flow_in_ram" in self.config else False
if self.config["spatial_size"][0] <= 256:
self.imgs_in_ram = self.config["imgs_in_ram"] if "imgs_in_ram" in self.config else False
# set instace specific fixed values which shall not be parameters from yaml
self._set_instance_specific_values()
self.subsample_step = config["subsample_step"] if "subsample_step" in config else self.subsample_step
self.logger.info(f'Subsample step of {self.__class__.__name__} is {self.subsample_step}.')
filt_msg = "enabled" if self.filter_flow else "disabled"
self.logger.info(f"Flow filtering is {filt_msg} in {self.__class__.__name__}!")
self.logger.info(f"Valid lag of {self.__class__.__name__} is {self.valid_lags[0]}")
# load data
metafile_path = path.join(self.datapath, f"{self.metafilename}.p")
with open(metafile_path, "rb") as handle:
self.data = pickle.load(handle)
if path.isfile(path.join(self.datapath,"dataset_stats.p")) and self.normalize_flows:
with open(path.join(self.datapath,"dataset_stats.p"),"rb") as norm_file:
self.flow_norms = pickle.load(norm_file)
# choose filter procedure
available_frame_nrs = np.asarray([int(p.split("/")[-1].split(".")[0].split("_")[-1]) - int(p.split("/")[-1].split(".")[0].split("_")[-2]) for p in self.data["flow_paths"][0]])
# filter invalid flow_paths
self.data["flow_paths"] = [p for p in self.data["flow_paths"] if len(p) == len(available_frame_nrs)]
self.filter_proc = self.config["filter"] if "filter" in self.config else "all"
# remove invalid video
valid_ids = np.logical_not(np.char.startswith(self.data["img_path"],"VID_0_3_1024x1024"))
# set flow paths in right order after reading in the data
if "max_fid" not in self.data:
self.data["flow_paths"] = [natsorted(d) for d in self.data["flow_paths"]]
# make absolute image and flow paths
self.data["img_path"] = [
path.join(self.datapath, p if not p.startswith("/") else p[1:]) for p in self.data["img_path"]
]
self.data["flow_paths"] = [
[path.join(self.datapath, f if not f.startswith("/") else f[1:]) for f in fs]
for fs in self.data["flow_paths"]
]
# convert to numpy array
self.data = {key: np.asarray(self.data[key])[valid_ids] for key in self.data}
# if max fid is not predefined, the videos, the dataset consists of are sufficiently long, such that it doesn't make much of a difference,
# if some frames at the end are skipped, therefore, we set the last valid fid (which is indicated by "max_fid") to the maximum fid
# in the respective sequence
if "max_fid" not in self.data:
available_frame_nrs = np.asarray([int(p.split("/")[-1].split(".")[0].split("_")[-1]) - int(p.split("/")[-1].split(".")[0].split("_")[-2]) for p in self.data["flow_paths"][0]])
self.data.update({"max_fid": np.zeros((np.asarray(self.data["fid"]).shape[0],max(len(available_frame_nrs),self.valid_lags[0]+1)),dtype=np.int)})
for vid in np.unique(self.data["vid"]):
self.data["max_fid"][self.data["vid"] == vid] = np.amax(self.data["fid"][self.data["vid"] == vid])
if not self.var_sequence_length and ("poke" in self.datakeys or "flow" in self.datakeys) and not self.normalize_flows:
# reset valid_lags, such that always the right flow which corresponds to the respective sequence length, is chosen
if not self.__class__.__name__ == "Human36mDataset":
available_frame_nrs = np.asarray([int(p.split("/")[-1].split(".")[0].split("_")[-1]) - int(p.split("/")[-1].split(".")[0].split("_")[-2]) for p in self.data["flow_paths"][0]])
if "n_ref_frames" not in self.config:
assert self.max_frames * self.subsample_step in available_frame_nrs
right_lag = int(np.argwhere(available_frame_nrs == self.max_frames * self.subsample_step))
self.logger.info(f'Last frames of sequence serves as target frame.')
else:
self.logger.info(f'Number of frames in between target and start frames is {self.config["n_ref_frames"]}')
assert self.config["n_ref_frames"]*self.subsample_step in available_frame_nrs
right_lag = int(np.argwhere(available_frame_nrs==self.config["n_ref_frames"] * self.subsample_step))
self.valid_lags = [right_lag]
else:
assert self.max_frames == 10
assert self.subsample_step in [1,2]
self.valid_lags = [0] if self.subsample_step == 1 else [1]
self.logger.info(f"Dataset is run in fixed length mode, valid lags are {self.valid_lags}.")
filt_msg = "enabled" if self.filter_flow else "disabled"
self.logger.info(f"Flow filtering is {filt_msg} in {self.__class__.__name__}!")
self.logger.info(f"Valid lag of {self.__class__.__name__} is {self.valid_lags[0]}")
filt_msg = "enabled" if self.obj_weighting else "disabled"
self.logger.info(f"Object weighting is {filt_msg} in {self.__class__.__name__}!")
filt_msg = "enabled" if self.flow_weights else "disabled"
self.logger.info(f"Patch weighting is {filt_msg} in {self.__class__.__name__}!")
filt_msg = "enabled" if self.use_flow_for_weights else "disabled"
self.logger.info(f"Flow patch extraction is {filt_msg} in {self.__class__.__name__}!")
if self.filter_proc == "action":
self.data = {key:self.data[key][self.data["action_id"]==2] for key in self.data}
elif self.filter_proc == "pose":
self.data = {key: self.data[key][self.data["action_id"] == 1] for key in self.data}
# on this point, the raw data is parsed and can be processed further
# exclude invalid object ids from data
self.logger.info(f"Excluding the following, user-defined object ids: {self.excluded_objects} from dataloading.")
kept_ids = np.nonzero(np.logical_not(np.isin(self.data["object_id"], self.excluded_objects)))[0]
self.data = {key:self.data[key][kept_ids] for key in self.data}
self.split = self.config["split"]
split_data, train_indices, test_indices = self._make_split(self.data)
self.datadict = (
split_data["train"] if self.train else split_data["test"]
)
msg = "train" if self.train else "test"
vids, start_ids = np.unique(self.datadict["vid"],return_index=True)
# get start and end ids per sequence
self.eids_per_seq = {vid: np.amax(np.flatnonzero(self.datadict["vid"] == vid)) for vid in vids}
seids = np.asarray([self.eids_per_seq[self.datadict["vid"][i]] for i in range(self.datadict["img_path"].shape[0])],dtype=np.int)
self.datadict.update({"seq_end_id": seids})
self.sids_per_seq = {vid:i for vid,i in zip(vids,start_ids)}
self.seq_len_T_chunk = {l: c for l,c in enumerate(np.linspace(0,self.flow_cutoff,self.max_frames,endpoint=False))}
# add last chunk
self.seq_len_T_chunk.update({self.max_frames: self.flow_cutoff})
if self.var_sequence_length:
if "flow_range" in self.datadict.keys():
self.ids_per_seq_len = {length: np.flatnonzero(np.logical_and(np.logical_and(self.datadict["flow_range"][:,1,self.valid_lags[0]]>self.seq_len_T_chunk[length],
np.less_equal(np.arange(self.datadict["img_path"].shape[0]) +
(self.min_frames + length)*self.subsample_step + 1,
self.datadict["seq_end_id"])),
np.less_equal(self.datadict["fid"],self.datadict["max_fid"][:,self.valid_lags[0]])))
for length in np.arange(self.max_frames)}
else:
self.ids_per_seq_len = {length: np.flatnonzero(np.less_equal(self.datadict["fid"],self.datadict["max_fid"][:,self.valid_lags[0]])) for length in np.arange(self.max_frames)}
for length in self.ids_per_seq_len:
actual_ids = self.ids_per_seq_len[length]
oids, counts_per_obj = np.unique(self.datadict["object_id"][actual_ids],return_counts=True)
weights = np.zeros_like(actual_ids,dtype=np.float)
for oid,c in zip(oids,counts_per_obj):
weights[self.datadict["object_id"][actual_ids]==oid] = 1. / (c * oids.shape[0])
self.object_weights_per_seq_len.update({length:weights})
obj_ids, obj_counts = np.unique(self.datadict["object_id"], return_counts=True)
weights = np.zeros_like(self.datadict["object_id"], dtype=np.float)
for (oid, c) in zip(obj_ids, obj_counts):
weights[self.datadict["object_id"] == oid] = 1. / c
weights = weights / obj_ids.shape[0]
self.datadict.update({"weights": weights})
if self.flow_in_ram:
self.logger.warn(f"Load flow maps in RAM... please make sure to have enough capacity there.")
assert len(self.valid_lags) == 1
self.loaded_flows = parallel_data_prefetch(self._read_flows, self.datadict["flow_paths"][:,self.valid_lags[0]],n_proc=72,cpu_intensive=True)
assert self.loaded_flows.shape[0] == self.datadict["img_path"].shape[0]
if self.imgs_in_ram:
self.logger.warn(f"Load images in RAM... please make sure to have enough capacity there.")
self.loaded_imgs = parallel_data_prefetch(self._read_imgs, self.datadict["img_path"],n_proc=72,cpu_intensive=True)
assert self.loaded_imgs.shape[0] == self.datadict["img_path"].shape[0]
if google_imgs:
img_paths = [p for p in glob(path.join(self.datapath,"google_images", "*")) if path.isfile(p) and any(map(lambda x: p.endswith(x), ["jpg", "jpeg", "png"]))]
self.datadict["img_path"] = np.asarray(img_paths)
self.logger.info("Use images from Google.")
msg = "Flow normalization enabled!" if self.normalize_flows else "Flow normalization disabled!"
self.logger.info(
f'Initialized {self.__class__.__name__} in "{msg}"-mode. Dataset consists of {self.__len__()} samples. ' + msg
)
def _set_instance_specific_values(self):
# set flow cutoff to 0.2 as this seems to be a good heuristic for Plants
self.valid_lags = [0]
self.flow_cutoff = 0.4
self.extended_annotations = False
self.subsample_step = 2
self.min_frames = 5
self.obj_weighting = True
if not 8 in self.excluded_objects:
self.excluded_objects.append(8)
self.metafilename = "meta"
# self.metafilename = 'test_codeprep_metadata'
def _read_flows(self,data):
read_flows = []
flow_paths = data
def proc_flow(flow):
org_shape = float(flow.shape[-1])
dsize = None
if "spatial_size" in self.config:
dsize = self.config["spatial_size"]
elif "resize_factor" in self.config:
dsize = (
int(float(flow.shape[1]) / self.config["resize_factor"]),
int(float(flow.shape[2]) / self.config["resize_factor"]),
)
flow = F.interpolate(
torch.from_numpy(flow).unsqueeze(0), size=dsize, mode="bilinear", align_corners=True
).numpy()
flow = flow / (org_shape / dsize[0])
return flow
for i, flow_path in enumerate(tqdm(flow_paths)):
try:
f = np.load(flow_path)
f = proc_flow(f)
except ValueError:
try:
f = np.load(flow_path, allow_pickle=True)
f = proc_flow(f)
except Exception as ex:
self.logger.error(ex)
read_flows.append("None")
continue
except:
self.logger.error("Fallback error ocurred. Append None and continue")
read_flows.append("None")
continue
read_flows.append(f)
return np.concatenate(read_flows,axis=0)
def _read_imgs(self,imgs):
read_imgs = []
for img_path in tqdm(imgs):
img = cv2.imread(img_path)
# image is read in BGR
img = preprocess_image(img, swap_channels=True)
img = cv2.resize(
img, self.config["spatial_size"], cv2.INTER_LINEAR
)
read_imgs.append(img)
return read_imgs
def _make_split(self,data):
vids = np.unique(self.data["vid"])
split_data = {"train": {}, "test": {}}
if self.split == "videos":
# split such that some videos are held back for testing
self.logger.info("Splitting data after videos")
shuffled_vids = deepcopy(vids)
np.random.shuffle(shuffled_vids)
train_vids = shuffled_vids[: int(0.8 * shuffled_vids.shape[0])]
train_indices = np.nonzero(np.isin(data["vid"], train_vids))[0]
test_indices = np.nonzero(np.logical_not(train_indices))[0]
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
else:
self.logger.info(f"splitting data across_videos")
train_indices = np.asarray([],dtype=np.int)
test_indices = np.asarray([], dtype=np.int)
for vid in vids:
indices = np.nonzero(data["vid"] == vid)[0]
# indices = np.arange(len(tdata["img_path"]))
# np.random.shuffle(indices)
train_indices = np.append(train_indices,indices[: int(0.8 * indices.shape[0])])
test_indices = np.append(test_indices,indices[int(0.8 * indices.shape[0]) :])
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
return split_data, train_indices, test_indices
class VegetationDataset(PlantDataset):
def _set_instance_specific_values(self):
self.filter_flow = False
self.valid_lags = [0]
self.flow_cutoff = .3
self.min_frames = 5
self.subsample_step = 2
# self.datapath = "/export/data/ablattma/Datasets/vegetation_new/"
self.metafilename = "meta"
self.datadict.update({"train": []})
self.obj_weighting = True
# set flow_weights to false
self.flow_weights = False
def _make_split(self,data):
split_data = {"train":{},"test":{}}
train_ids = np.flatnonzero(data["train"])
test_ids = np.flatnonzero(np.logical_not(data["train"]))
assert np.intersect1d(train_ids,test_ids).size == 0
split_data["train"] = {
key: data[key][train_ids] for key in data
}
split_data["test"] = {
key: data[key][test_ids] for key in data
}
return split_data, train_ids, test_ids
class TaichiDataset(VegetationDataset):
def _set_instance_specific_values(self):
self.filter_flow = True
self.valid_lags = [1]
self.flow_cutoff = .1
self.min_frames = 5
self.subsample_step = 2
# self.datapath = "/export/scratch/compvis/datasets/taichi/taichi/"
self.metafilename = 'meta'
self.datadict.update({"train": []})
self.obj_weighting = False
# set flow_weights to false
self.flow_weights = self.config["flow_weights"] if "flow_weights" in self.config else True
self.flow_width_factor = 5
self.target_lags = [10,20]
class LargeVegetationDataset(VegetationDataset):
def _set_instance_specific_values(self):
self.filter_flow = False
self.valid_lags = [0]
self.flow_cutoff = .1
self.min_frames = 5
self.subsample_step = 2
# self.datapath = "/export/scratch/compvis/datasets/plants/processed_256_resized/"
self.metafilename = "meta"
self.datadict.update({"train": []})
self.excluded_objects = [1,2,3]
self.obj_weighting = True
class IperDataset(PlantDataset):
def _set_instance_specific_values(self):
self.filter_flow = True
self.flow_width_factor = 5
self.valid_lags = [0]
# set flow cutoff to 0.45 as this seems to be a good heuristic for Iper
self.flow_cutoff = 0.6
self.min_frames = 5
# self.datapath = "/export/scratch/compvis/datasets/iPER/processed_256_resized/"
self.metafilename = 'meta' #"test_codeprep_metadata"
self.datadict.update({"actor_id": [], "action_id": []})
# set object weighting always to false
self.obj_weighting = False
self.flow_weights = self.config["flow_weights"] if "flow_weights" in self.config else True
self.use_flow_for_weights = False
def _make_split(self,data):
split_data = {"train": {}, "test": {}}
if self.split == "videos":
key = "vid"
elif self.split == "objects":
key = "object_id"
elif self.split == "actions":
key = "action_id"
elif self.split == "actors":
key = "actor_id"
elif self.split == "official":
# this is the official train test split as in the original paper
with open(path.join("/".join(self.datapath.split("/")[:-1]),"train.txt"),"r") as f:
train_names = f.readlines()
train_indices = np.asarray([],dtype=np.int)
for n in train_names:
n = n.replace("/","_").rstrip()
train_indices = np.append(train_indices,np.flatnonzero(np.char.find(data["img_path"],n) != -1))
train_indices = np.sort(train_indices)
test_indices = np.flatnonzero(np.logical_not(np.isin(np.arange(data["img_path"].shape[0]),train_indices)))
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
return split_data, train_indices, test_indices
else:
vids = np.unique(self.data["vid"])
self.logger.info(f"splitting data across_videos")
train_indices = np.asarray([], dtype=np.int)
test_indices = np.asarray([], dtype=np.int)
for vid in vids:
indices = np.nonzero(data["vid"] == vid)[0]
# indices = np.arange(len(tdata["img_path"]))
# np.random.shuffle(indices)
train_indices = np.append(train_indices, indices[: int(0.8 * indices.shape[0])])
test_indices = np.append(test_indices, indices[int(0.8 * indices.shape[0]):])
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
return split_data, train_indices, test_indices
# split such that some objects are held back for testing
self.logger.info(f"Splitting data after {key}")
ids = np.unique(data[key])
shuffled_ids = deepcopy(ids)
np.random.shuffle(shuffled_ids)
train_ids = shuffled_ids[: int(0.8 * shuffled_ids.shape[0])]
train_indices = np.flatnonzero(np.isin(data[key], train_ids))
test_indices = np.flatnonzero(np.logical_not(np.isin(np.arange(self.data["img_path"].shape[0]),train_indices)))
train_indices = np.sort(train_indices)
test_indices = np.sort(test_indices)
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
return split_data, train_indices, test_indices
class Human36mDataset(PlantDataset):
def _set_instance_specific_values(self):
self.valid_lags = [1]
self.flow_cutoff = 0.3
self.min_frames = 5
self.subsample_step = 2
# self.datapath = "/export/scratch/compvis/datasets/human3.6M/video_prediction"
self.metafilename = "meta"
self.datadict.update({"actor_id": [], "action_id": [], "train": []})
# set object weighting always to false
self.obj_weighting = False
self.filter_flow = False
self.flow_width_factor = 5
self.flow_weights = True
self.use_flow_for_weights = True
self.use_lanczos = True
def _make_split(self,data):
split_data = {"train": {}, "test": {}}
if self.split == "official":
train_ids = np.flatnonzero(data["train"])
test_ids = np.flatnonzero(np.logical_not(data["train"]))
assert np.intersect1d(train_ids, test_ids).size == 0
split_data["train"] = {
key: data[key][train_ids] for key in data
}
split_data["test"] = {
key: data[key][test_ids] for key in data
}
return split_data, train_ids, test_ids
elif self.split == "gui":
vids = np.unique(self.data["vid"])
self.logger.info(f"splitting data across_videos")
train_indices = np.asarray([], dtype=np.int)
test_indices = np.asarray([], dtype=np.int)
for vid in vids:
indices = np.nonzero(data["vid"] == vid)[0]
# indices = np.arange(len(tdata["img_path"]))
# np.random.shuffle(indices)
train_indices = np.append(train_indices, indices[: int(0.8 * indices.shape[0])])
test_indices = np.append(test_indices, indices[int(0.8 * indices.shape[0]):])
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
return split_data, train_indices, test_indices
else:
raise ValueError(f'Specified split type "{self.split}" is not valid for Human36mDataset.')
class GoogleImgDataset(Dataset, LoggingParent):
def __init__(self, base_dir, config,):
Dataset.__init__(self)
LoggingParent.__init__(self)
self.logger.info(f"Initialize GoogleImgDataset with basepath {base_dir}")
self.config = config
img_paths = [p for p in glob(path.join(base_dir,"*")) if path.isfile(p) and any(map(lambda x: p.endswith(x),["jpg","jpeg","png"]))]
self.datadict = {"img_path": np.asarray(img_paths)}
self.transforms = tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
])
self.logger.info(f"Initialized Dataset with {self.__len__()} images")
def __getitem__(self, idx):
return self.datadict["img_path"][idx]
def __len__(self):
return self.datadict["img_path"].shape[0]
if __name__ == "__main__":
import yaml
import torch
from torchvision import transforms as tt
from torch.utils.data import DataLoader, RandomSampler
import cv2
from os import makedirs
from tqdm import tqdm
from data import get_dataset
from data.samplers import SequenceSampler, SequenceLengthSampler
from utils.testing import make_video, make_flow_grid
from utils.general import get_patches
seed = 42
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
# random.seed(opt.seed)
torch.backends.cudnn.deterministic = True
torch.manual_seed(42)
rng = np.random.RandomState(42)
# load config
fpath = path.dirname(path.realpath(__file__))
configpath = path.abspath(path.join(fpath, "../config/test_config.yaml"))
with open(configpath, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
transforms = tt.Compose(
[tt.ToTensor(), tt.Lambda(lambda x: (x * 2.0) - 1.0)]
)
datakeys = ["images", "img_aT", "img_sT", "app_img_cmp", "app_img_random","flow", "poke"]
make_overlay = config["general"]["overlay"]
# generate dataset
dset, transforms = get_dataset(config["data"],transforms)
test_dataset = dset(transforms, datakeys, config["data"],train=True)
save_dir = f"test_data/{test_dataset.__class__.__name__}"
makedirs(save_dir, exist_ok=True)
print(test_dataset.datapath)
if test_dataset.yield_videos:
def init_fn(worker_id):
return np.random.seed(np.random.get_state()[1][0] + worker_id)
if test_dataset.var_sequence_length:
sampler = SequenceLengthSampler(test_dataset,shuffle=True,drop_last=False, batch_size=config["training"]["batch_size"],zero_poke=config["data"]["include_zeropoke"])
loader = DataLoader(test_dataset, batch_sampler=sampler, num_workers=config["data"]["num_workers"], worker_init_fn=init_fn)
else:
sampler = RandomSampler(test_dataset)
loader = DataLoader(test_dataset,batch_size=config["training"]["batch_size"], sampler=sampler,num_workers=config["data"]["num_workers"],
worker_init_fn=init_fn, drop_last= True)
n_logged = config["testing"]["n_logged"]
for i, batch in enumerate(tqdm(loader)):
if i >200:
break
imgs = batch["images"][:n_logged]
src_img = imgs[:,0]
tgt_img = imgs[:,-1]
flow = batch["flow"][:n_logged]
poke = batch["poke"][:n_logged][0] if test_dataset.flow_weights else batch["poke"][:n_logged]
weights = batch["poke"][:n_logged][1] if test_dataset.flow_weights else None
postfix = "weighted" if config["data"]["object_weighting"] else "unweighted"
if weights is not None:
imgs = get_patches(imgs,weights,config["data"],test_dataset.weight_value_flow)
postfix = postfix + "_patched"
out_vid = make_video(imgs[:,0],poke,imgs,imgs,n_logged=min(n_logged,config["training"]["batch_size"]),flow=flow,logwandb=False, flow_weights=weights)
warping_test = make_flow_grid(src_img,flow,tgt_img,tgt_img,n_logged=min(n_logged,config["training"]["batch_size"]))
warping_test = cv2.cvtColor(warping_test,cv2.COLOR_RGB2BGR)
cv2.imwrite(path.join(save_dir,f'warping_test-{i}.png'),warping_test)
savename = path.join(save_dir,f"vid-grid-{i}-{postfix}.mp4")
writer = cv2.VideoWriter(
savename,
cv2.VideoWriter_fourcc(*"MP4V"),
5,
(out_vid.shape[2], out_vid.shape[1]),
)
# writer = vio.FFmpegWriter(savename,inputdict=inputdict,outputdict=outputdict)
for frame in out_vid:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
writer.write(frame)
writer.release()
else:
sampler = SequenceSampler(test_dataset, batch_size=config["training"]["batch_size"], shuffle=False, drop_last=False)
loader = DataLoader(test_dataset, batch_sampler=sampler, num_workers=config["data"]["num_workers"])
#assert sampler.batch_size == 1
postfix = "filt" if test_dataset.filter_flow else "nofilt "
for i, batch in enumerate(tqdm(loader)):
if i > 200:
break
batch = {key: batch[key].squeeze(0) if not isinstance(batch[key],list) else [e.squeeze(0) for e in batch[key]] for key in batch}
src_img = batch["images"][0]
tgt_img = batch["images"][-1]
# vis augmented images
img_aT = batch["img_aT"][0]
img_sT = batch["img_sT"]
img_dis = batch["app_img_random"]
img_cmp = batch["app_img_cmp"]
# # vis flow
flow_map = batch["flow"].permute(1, 2, 0).cpu().numpy()
flow_map -= flow_map.min()
flow_map /= flow_map.max()
flow_map = (flow_map * 255.0).astype(np.uint8)
# vis poke
poke = batch["poke"][0].permute(1, 2, 0).cpu().numpy() if test_dataset.flow_weights else batch["poke"].permute(1, 2, 0).cpu().numpy()
if test_dataset.flow_weights:
weight_map = batch["poke"][1].cpu().numpy()
weight_map = ((weight_map - weight_map.min()) / weight_map.max() * 255.).astype(np.uint8)
heatmap = cv2.applyColorMap(weight_map, cv2.COLORMAP_HOT)
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_RGB2BGR)
# visualize poke patch in flow map as white region
flow_map = np.where((poke**2).sum(-1,keepdims=True)>0, np.full_like(flow_map, 255), flow_map)
poke -= poke.min()
poke /= poke.max()
poke = (poke * 255.0).astype(np.uint8)
# vis inverted flow
# flow_map_inv = batch["flow_inv"].permute(1, 2, 0).cpu().numpy()
# flow_map_inv -= flow_map_inv.min()
# flow_map_inv /= flow_map_inv.max()
# flow_map_inv = (flow_map_inv * 255.0).astype(np.uint8)
# vis images
src_img = (
((src_img.permute(1, 2, 0).cpu() + 1) * 127.5)
.numpy()
.astype(np.uint8)
)
tgt_img = (
((tgt_img.permute(1, 2, 0).cpu() + 1) * 127.5)
.numpy()
.astype(np.uint8)
)
img_aT = ((img_aT.permute(1, 2, 0).cpu() + 1) * 127.5).numpy().astype(np.uint8)
img_sT = ((img_sT.permute(1, 2, 0).cpu() + 1) * 127.5).numpy().astype(np.uint8)
img_dis = ((img_dis.permute(1, 2, 0).cpu() + 1) * 127.5).numpy().astype(np.uint8)
img_cmp = ((img_cmp.permute(1, 2, 0).cpu() + 1) * 127.5).numpy().astype(np.uint8)
if make_overlay:
overlay = cv2.addWeighted(src_img,0.5,tgt_img,0.5,0)
else:
tgt_img = [tgt_img,heatmap] if test_dataset.flow_weights else [tgt_img]
zeros = np.expand_dims(np.zeros_like(flow_map).sum(2), axis=2)
flow_map = np.concatenate([flow_map, zeros], axis=2)
poke = np.concatenate([poke, zeros], axis=2)
# flow_map_inv = np.concatenate([flow_map_inv,zeros],axis=2)
if make_overlay:
grid = np.concatenate([src_img, *tgt_img,overlay, img_sT, img_aT, img_dis, img_cmp, flow_map, poke], axis=1).astype(np.uint8)
else:
grid = np.concatenate([src_img, *tgt_img, img_sT, img_aT, img_dis, img_cmp, flow_map, poke], axis=1).astype(np.uint8)
grid = cv2.cvtColor(grid,cv2.COLOR_BGR2RGB)
cv2.imwrite(path.join(save_dir, f"test_grid_{i}-{postfix}.png"), grid)
| 32,596 | 41.947299 | 192 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/base_dataset.py | from functools import partial
from itertools import chain
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset
from torchvision import transforms as T
from torchvision.transforms import functional as FT
from PIL import Image
import numpy as np
from abc import abstractmethod
import cv2
from utils.general import convert_flow_2d_to_3d, get_flow_gradients
from data.helper_functions import preprocess_image
from utils.general import LoggingParent
class FlowError(Exception):
"""Raises an exception when no valid flow file could be found
"""
def __init__(self, path, msg=None):
if msg is None:
message = f'Could not load flow file "{path}" neither with "allow_pickle=False" nor with "allow_pickle=True". Considering different sequence....'
else:
message = msg
super().__init__(message)
class BaseDataset(Dataset, LoggingParent):
def __init__(self, transforms, datakeys: list, config: dict, train=True):
Dataset.__init__(self)
LoggingParent.__init__(self)
# list of keys for the data that shall be retained
assert len(datakeys) > 0
self.datakeys = datakeys
# torchvision.transforms
self.transforms = transforms
# config: contains all relevant configuration parameters
self.config = config
self.train = train
assert "spatial_size" in self.config
self.datapath = self.config['datapath']
# self.valid_lags = np.unique(self.config["valid_lags"]) if "valid_lags" in self.config else list(range(6))
self.yield_videos = self.config["yield_videos"] if "yield_videos" in self.config else False
# everything, which has to deal with variable sequence lengths
self.var_sequence_length = self.config["var_sequence_length"] if "var_sequence_length" in self.config and self.yield_videos else False
self.longest_seq_weight = self.config["longest_seq_weight"] if "longest_seq_weight" in self.config else None
self.scale_poke_to_res = self.config["scale_poke_to_res"] if "scale_poke_to_res" in self.config else False
if self.scale_poke_to_res:
self.logger.info(f'Scaling flows and pokes to dataset resolution, which is {self.config["spatial_size"]}')
self.logger.info(f'Dataset is yielding {"videos" if self.yield_videos else "images"}.')
self.poke_size = self.config["poke_size"] if "poke_size" in self.config else self.config["spatial_size"][0] / 128 * 10
if "poke" in self.datakeys:
self.logger.info(f"Poke size is {self.poke_size}.")
# for flow filtering: default values are such that nothing changes
self.filter_flow = False
self.flow_width_factor = None
# whether fancy appearance augmentation shall be used or not
self.fancy_aug = self.config["fancy_aug"] if "fancy_aug" in self.config else False
# flow weighting, if intended to be enabled
self.flow_weights = self.config["flow_weights"] if "flow_weights" in self.config else False
self.weight_value_flow = self.config["foreground_value"] if "foreground_value" in self.config else 1.
self.weight_value_poke = self.config["poke_value"] if "poke_value" in self.config else 1.
self.weight_value_bg = self.config["background_weight"] if "background_weight" in self.config else 1.
# whether to use only one value in for poke or the complete flow field within that patch
self.equal_poke_val = self.config["equal_poke_val"] if "equal_poke_val" in self.config else True
# Whether or not to normalize the flow values
self.normalize_flows = self.config["normalize_flows"] if "normalize_flows" in self.config else False
# Whether to weight different objects (i.e. samples with different object_ids) the way that the should be yield equally often (recommended for imbalanced datasets)
self.obj_weighting = self.config["object_weighting"] if "object_weighting" in self.config else False
self.p_col= self.config["p_col"] if "p_col" in self.config else 0
self.p_geom = self.config["p_geom"] if "p_geom" in self.config else 0
self.ab = self.config["augment_b"] if "augment_b" in self.config else 0
self.ac = self.config["augment_c"] if "augment_c" in self.config else 0
self.ah = self.config["augment_h"] if "augment_h" in self.config else 0
self.a_s = self.config["augment_s"] if "augment_s" in self.config else 0
self.ad = self.config["aug_deg"] if "aug_deg" in self.config else 0
self.at = self.config["aug_trans"] if "aug_trans" in self.config else (0,0)
self.use_lanczos = self.config["use_lanczos"] if "use_lanczos" in self.config else False
self.pre_T = T.ToPILImage()
self.z1_normalize = "01_normalize" in self.config and self.config["01_normalize"]
if self.z1_normalize:
self.post_T = T.Compose([T.ToTensor(),])
else:
self.post_T = T.Compose([T.ToTensor(),T.Lambda(lambda x: (x * 2.0) - 1.0)])
self.post_edges = T.Compose([T.ToTensor()])
# key:value mappings for every datakey in self.datakeys
self._output_dict = {
"images": [partial(self._get_imgs)],
"poke": [self._get_poke],
"flow": [self._get_flow],
"img_aT": [partial(self._get_imgs,use_fb_aug = self.fancy_aug), ["color"]],
"img_sT": [partial(self._get_imgs,sample=True),["geometry"]],
"app_img_random": [self._get_transfer_img],
"app_img_dis": [partial(self._get_imgs, sample=True), ["color", "geometry"]],
"app_img_cmp": [self._get_transfer_img],
"flow_3D": [self._get_3d_flow],
"poke_3D": [self._get_3d_poke],
"edge_image": [self._get_edge_image],
"edge_flow": [self._get_edge_flow],
"flow_3D_series": [self._get_flow_series],
"image_series": [self._get_image_series]
}
if self.fancy_aug:
assert "app_img_dis" not in self.datakeys
# the data that's held by the dataset
self.datadict = {
"img_path": [],
"flow_paths": [],
"img_size": [],
"flow_size": [],
"vid": [],
"fid": [],
"object_id": [],
# "original_id": [],
"flow_range": []
}
self.max_frames = self.config["max_frames"] if "max_frames" in self.config else 1
self.augment = self.config["augment_wo_dis"] if ("augment_wo_dis" in self.config and self.train) else False
self.color_transfs = None
self.geom_transfs = None
self.subsample_step = 1
self.min_frames = None
# sequence start and end ids are related to the entire dataset and so is self.img_paths
self.eids_per_seq = {}
self.sids_per_seq = {}
self.seq_len_T_chunk = {}
self.max_trials_flow_load = 50
#self.img_paths = {}
self.mask=None
self.flow_norms = None
self.flow_in_ram = False
self.imgs_in_ram = False
self.outside_length = None
self.loaded_flows = []
self.loaded_imgs = []
self.valid_lags = None
self.ids_per_seq_len = {}
self.object_weights_per_seq_len = {}
if "weight_zeropoke" in self.config and "include_zeropoke" in self.config:
self.zeropoke_weight = max(1.,float(self.max_frames) / 5) if self.config["weight_zeropoke"] and self.config["include_zeropoke"] else 1.
else:
self.zeropoke_weight = 1.
# this is the value, which will be the upper bound for all normalized optical flows, when training on variable sequence lengths
# per default, set to 1 here (max) can be adapted, if necessary, in the subclass of base dataset
self.flow_cutoff = 1.
self.valid_h = [self.poke_size, self.config["spatial_size"][0] - self.poke_size]
self.valid_w = [self.poke_size, self.config["spatial_size"][1] - self.poke_size]
self.use_flow_for_weights = False
def __getitem__(self, idx):
"""
:param idx: The idx is here a tuple, consisting of the actual id and the sampled lag for the flow in the respective iteration
:return:
"""
# collect outputs
data = {}
transforms = {"color": self._get_color_transforms(), "geometry" : self._get_geometric_transforms()}
self.color_transfs = self._get_color_transforms() if self.augment else None
self.geom_transfs = self._get_geometric_transforms() if self.augment else None
# sample id (in case, sample is enabled)
if self.var_sequence_length:
idx = self._get_valid_ids(*idx)
else:
idx = self._get_valid_ids(length=None,index=idx)
sidx = int(np.random.choice(np.flatnonzero(self.datadict["vid"] == self.datadict["vid"][idx[0]]), 1))
tr_vid = int(np.random.choice(self.datadict["vid"][self.datadict["vid"] != self.datadict["vid"][idx[0]]], 1))
for i in range(self.max_trials_flow_load):
self.mask = {}
try:
self._get_mask(idx)
data = {key: self._output_dict[key][0](idx, sample_idx = sidx,
transforms = chain.from_iterable([transforms[tkey] for tkey in self._output_dict[key][1]]) if len(self._output_dict[key])>1 else None,
transfer_vid= tr_vid) for key in self.datakeys}
break
except FlowError as fe:
self.logger.error(fe)
# sample new id and try again
img_id = int(np.random.choice(np.arange(self.datadict["img_path"].shape[0]),1))
# don't change lag
idx = (img_id,idx[1])
if len(data) == 0:
raise IOError(f"Errors in flow files loading...tried it {self.max_trials_flow_load} times consecutively without success.")
return data
def _get_valid_ids(self,length,index = None):
"""
:param length: The sequence length (or flow step, depending on whether var_sequence_length is True or False)
:param index: The id correspinding to the
:return:
"""
# we need to do the following things:
# take care, that choose one start id from all samples, which have the appropriate flow_magnitude and result in sequences which are within the same video
if self.var_sequence_length:
#ids = np.flatnonzero(np.logical_and(self.datadict["flow_range"][:,1]>self.seq_len_T_chunk[length],np.less_equal(np.arange(self.datadict["img_path"].shape[0]) + self.min_seq_length[0] + length*self.subsample_step,self.datadict["seq_end_id"])))
if length == -1:
# use maximum sequence length for such cases
# length = int(np.random.choice(np.arange(self.max_frames),1))
# in case length == -1: index corresponds to actual sampled length for the regarded batch
self.outside_length = index
start_id = int(np.random.choice(self.ids_per_seq_len[self.outside_length], 1))
else:
ids = self.ids_per_seq_len[length]
if self.obj_weighting:
start_id = int(np.random.choice(ids, 1, p=self.object_weights_per_seq_len[length]))
else:
start_id = int(np.random.choice(ids, 1))
else:
if index == -1:
length = -1
if self.obj_weighting:
index = int(np.random.choice(np.arange(self.datadict["object_id"].shape[0]),p=self.datadict["weights"],size=1))
else:
index = int(np.random.choice(np.arange(self.datadict["object_id"].shape[0]), p=self.datadict["weights"], size=1))
max_id_fid = self.sids_per_seq[self.datadict["vid"][index]] + self.datadict["max_fid"][index,self.valid_lags[0]] - 1
start_id = min(min(index,self.datadict["seq_end_id"][index]-(self.max_frames* self.subsample_step) - 1),max_id_fid)
return (start_id,length)
def _get_3d_flow(self, ids, **kwargs):
flow = self._get_flow(ids)
flow = convert_flow_2d_to_3d(flow)
return flow
def _get_3d_poke(self, ids, **kwargs):
flow = self._get_poke(ids)
flow = convert_flow_2d_to_3d(flow)
return flow
def _get_edge_image(self, ids, sample_idx, transforms=None, sample=False, use_fb_aug=False, **kwargs):
imgs = []
if sample:
yield_ids = [sample_idx]
else:
yield_ids = self._get_yield_ids(ids)
for i,idx in enumerate(yield_ids):
img_path = self.datadict["img_path"][idx]
img = cv2.imread(img_path)
# image is read in BGR
img = preprocess_image(img, swap_channels=True)
img = cv2.resize(
img, self.config["spatial_size"], cv2.INTER_LINEAR
)
# transformations
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gradient = cv2.Sobel(img/255, cv2.CV_64F, 1, 0, ksize=3)
gradient = self.post_edges(gradient)[0]
imgs.append(gradient)
gradient = cv2.Sobel(img/255, cv2.CV_64F, 0, 1, ksize=3)
gradient = self.post_edges(gradient)[0]
imgs.append(gradient)
return torch.stack(imgs, dim=0).squeeze(dim=0)
def _get_edge_flow(self, ids, **kwargs):
flow_path = self.datadict["flow_paths"][ids[0], self.valid_lags[0]]
# debug, this path seems to be erroneous
# flow_path = "/export/data/ablattma/Datasets/plants/processed_crops/VID_0_3_1024x1024/prediction_3_28.flow.npy"
try:
flow = np.load(flow_path)
except ValueError:
try:
flow = np.load(flow_path,allow_pickle=True)
except Exception as ex:
print(ex)
raise FlowError(flow_path)
except:
raise FlowError(flow_path)
dsize = None
if "spatial_size" in self.config:
dsize = self.config["spatial_size"]
elif "resize_factor" in self.config:
dsize = (
int(float(flow.shape[1]) / self.config["resize_factor"]),
int(float(flow.shape[2]) / self.config["resize_factor"]),
)
flow = F.interpolate(
torch.from_numpy(flow).unsqueeze(0), size=dsize, mode="nearest"
).squeeze(0)
if self.config["predict_3D"]:
flow = convert_flow_2d_to_3d(flow)
gradient_d1_x, gradient_d1_y, gradient_d2_x, gradient_d2_y = get_flow_gradients(flow)
all_gradients = [gradient_d1_x,
gradient_d1_y,
gradient_d2_x,
gradient_d2_y]
return torch.stack(all_gradients, dim=0).squeeze(dim=0)
def _get_transfer_img(self, ids, transfer_vid,**kwargs):
imgs=[]
yield_ids = [int(np.random.choice(np.flatnonzero(self.datadict["vid"] == transfer_vid), 1))]
for idx in yield_ids:
img_path = self.datadict["img_path"][idx]
img = cv2.imread(img_path)
# image is read in BGR
img = preprocess_image(img, swap_channels=True)
if "spatial_size" in self.config:
img = cv2.resize(
img, self.config["spatial_size"], cv2.INTER_LINEAR
)
elif "resize_factor" in self.config:
dsize = (
int(float(img.shape[1]) / self.config["resize_factor"]),
int(float(img.shape[0]) / self.config["resize_factor"]),
)
img = cv2.resize(img, dsize, interpolation=cv2.INTER_LINEAR)
# transformations
img = self.pre_T(img)
img = self.post_T(img)
imgs.append(img)
return torch.stack(imgs, dim=0).squeeze(dim=0)
def _compute_mask(self,target_id):
img = self._get_imgs([], sample_idx=target_id, sample=True)
if self.z1_normalize:
img = (img.permute(1, 2, 0).numpy() * 255.).astype(np.uint8)
else:
img = ((img.permute(1, 2, 0).numpy() + 1.) * 127.5).astype(np.uint8)
mask = np.zeros(img.shape[:2], np.uint8)
# rect defines starting background area
rect = (int(img.shape[1] / self.flow_width_factor), int(self.valid_h[0]), int((self.flow_width_factor - 2) / self.flow_width_factor * img.shape[1]), int(self.valid_h[1] - self.valid_h[0]))
# initialize background and foreground models
fgm = np.zeros((1, 65), dtype=np.float64)
bgm = np.zeros((1, 65), dtype=np.float64)
# apply grab cut algorithm
mask2, fgm, bgm = cv2.grabCut(img, mask, rect, fgm, bgm, 5, cv2.GC_INIT_WITH_RECT)
return mask2
def _compute_mask_with_flow(self,target_id):
flow = self._get_flow([target_id])
amplitude = torch.norm(flow, 2, dim=0)
amplitude -= amplitude.min()
amplitude /= amplitude.max()
# use only such regions where the amplitude is larger than mean + 1 * std
mask = torch.where(torch.gt(amplitude,amplitude.mean()+amplitude.std()),torch.ones_like(amplitude),torch.zeros_like(amplitude)).numpy().astype(np.bool)
return mask
def _get_mask(self,ids):
if self.filter_flow or self.fancy_aug or (self.flow_weights and self.yield_videos):
if self.use_flow_for_weights:
mask_src = self._compute_mask_with_flow(ids[0])
self.mask.update({"img_start": mask_src})
else:
mask_src = self._compute_mask(ids[0])
self.mask.update({"img_start" : np.where((mask_src == 2) | (mask_src == 0), 0, 1).astype(np.bool)})
if self.flow_weights:
yield_ids = self._get_yield_ids(ids)
tgt_id = yield_ids[-1]
if self.use_flow_for_weights:
mask_tgt = self._compute_mask_with_flow(tgt_id)
self.mask.update({"img_tgt": mask_tgt})
else:
mask_tgt = self._compute_mask(tgt_id)
self.mask.update({"img_tgt": np.where((mask_tgt == 2) | (mask_tgt == 0), 0, 1).astype(np.bool)})
if self.yield_videos:
mid_id = int((len(list(yield_ids))+yield_ids[0]) / 2)
if self.use_flow_for_weights:
mask_mid = self._compute_mask_with_flow(mid_id)
self.mask.update({"img_mid": mask_mid})
else:
mask_mid = self._compute_mask(mid_id)
self.mask.update({"img_mid": np.where((mask_mid == 2) | (mask_mid == 0), 0, 1).astype(np.bool)})
def _get_yield_ids(self,ids):
start_id = ids[0]
if self.yield_videos:
if ids[-1] == -1:
if self.var_sequence_length:
n_frames = self.min_frames + self.outside_length
yield_ids = np.stack([start_id]* n_frames,axis=0).tolist()
else:
yield_ids = np.stack([start_id]* (self.max_frames+1),axis=0).tolist()
else:
yield_ids = range(start_id, start_id + (self.min_frames + ids[-1]) * self.subsample_step + 1 ,self.subsample_step) \
if self.var_sequence_length else range(start_id, start_id + self.max_frames * self.subsample_step + 1, self.subsample_step)
else:
yield_ids = (start_id, start_id + (self.valid_lags[0] + 1) * 5)
return yield_ids
def _get_image_series(self, ids, step_width=10, **kwargs):
all_imgs = []
for i in range(1, step_width+1):
new_ids = (ids[0] + i * (1 + self.valid_lags[0]) * 5, ids[1])
flow = self._get_imgs(new_ids, None)
all_imgs.append(flow)
return torch.from_numpy(np.stack(all_imgs, axis=0))
# grabs a series of images
def _get_imgs(self, ids, sample_idx, transforms=None, sample=False, use_fb_aug=False, **kwargs):
imgs = []
if sample:
yield_ids = [sample_idx]
else:
# avoid generating the entire sequence for the color transformed image
if transforms is not None and self._get_color_transforms in transforms and not sample:
yield_ids = [ids[0]]
else:
yield_ids = self._get_yield_ids(ids)
for i,idx in enumerate(yield_ids):
faug = use_fb_aug and (i == 0 or i == len(yield_ids) - 1)
if self.imgs_in_ram:
img = self.loaded_imgs[idx]
else:
img_path = self.datadict["img_path"][idx]
img = cv2.imread(img_path)
img = preprocess_image(img, swap_channels=True)
# image is read in BGR
if self.use_lanczos and self.config["spatial_size"] == 64:
img = np.array(Image.fromarray(img).resize(self.config["spatial_size"], resample=Image.LANCZOS))
else:
img = cv2.resize(
img, self.config["spatial_size"], cv2.INTER_LINEAR
)
# transformations
img = self.pre_T(img)
if transforms is not None:
for t in transforms:
img = t(img)
if faug:
bts = self._get_color_transforms()
img_back = img
for bt in bts:
img_back = bt(img_back)
img_back = self.post_T(img_back)
else:
if self.color_transfs is not None:
for t in self.color_transfs:
img = t(img)
if self.geom_transfs is not None:
for t in self.geom_transfs:
img = t(img)
img = self.post_T(img)
if faug:
img = torch.where(torch.from_numpy(self.mask["img_start"]).unsqueeze(0),img,img_back)
imgs.append(img)
return torch.stack(imgs, dim=0).squeeze(dim=0)
# extracts pokes as flow patches
def _get_poke(self, ids, **kwargs):
seq_len_idx = ids[-1]
if seq_len_idx == -1:
# make fake ids to avoid returning zero flow for poke sampling
fake_ids = (ids[0],10)
flow = self._get_flow(fake_ids)
else:
flow = self._get_flow(ids)
# compute amplitude
amplitude = torch.norm(flow[:, self.valid_h[0]:self.valid_h[1], self.valid_w[0]:self.valid_w[1]], 2, dim=0)
amplitude -= amplitude.min()
amplitude /= amplitude.max()
if seq_len_idx == -1:
# use only very small poke values, this should indicate background values
amplitude_filt = amplitude
if self.filter_flow:
# only consider the part of the mask which corresponds to the region considered in flow
#amplitude_filt = torch.from_numpy(np.where(self.mask["img_start"][self.valid_h[0]:self.valid_h[1],self.valid_w[0]:self.valid_w[1]], amplitude, np.zeros_like(amplitude)))
indices_pre = np.nonzero(np.logical_not(self.mask["img_start"][self.valid_h[0]:self.valid_h[1],self.valid_w[0]:self.valid_w[1]]))
indices = torch.from_numpy(np.stack(indices_pre,axis=-1))
if indices.shape[0] == 0:
indices = torch.lt(amplitude, np.percentile(amplitude.numpy(), 5)).nonzero(as_tuple=False)
else:
indices = torch.lt(amplitude, np.percentile(amplitude.numpy(), 5)).nonzero(as_tuple=False)
#amplitude_filt = amplitude
std = amplitude_filt.std()
mean = torch.mean(amplitude_filt)
indices_mgn = torch.gt(amplitude_filt, mean + (std)).nonzero(as_tuple=False)
if indices_mgn.shape[0] == 0:
# if flow is not entirely equally distributed, there should be at least 1 value which is above the mean
# self.logger.warn("Fallback in Dataloading bacause no values remain after filtering.")
indices_mgn = torch.gt(amplitude_filt, mean).nonzero(as_tuple=False)
indices_mgn = indices_mgn + np.asarray([[self.valid_h[0], self.valid_w[0]]], dtype=np.int)
indices_mgn = (indices_mgn[:, 0], indices_mgn[:, 1])
else:
if self.filter_flow:
# only consider the part of the mask which corresponds to the region considered in flow
amplitude_filt = torch.from_numpy(np.where(self.mask["img_start"][self.valid_h[0]:self.valid_h[1],self.valid_w[0]:self.valid_w[1]], amplitude, np.zeros_like(amplitude)))
else:
amplitude_filt = amplitude
std = amplitude_filt.std()
mean = torch.mean(amplitude_filt)
if self.var_sequence_length:
amplitude_filt = torch.where(torch.from_numpy(np.logical_and((amplitude_filt > self.seq_len_T_chunk[ids[-1]]).numpy(),(amplitude_filt<self.seq_len_T_chunk[ids[-1]+1]).numpy())),
amplitude_filt, torch.zeros_like(amplitude_filt))
# compute valid indices by thresholding
indices = torch.gt(amplitude_filt, mean + (std * 2.0)).nonzero(as_tuple=False)
if indices.shape[0] == 0:
indices = torch.gt(amplitude, mean + std).nonzero(as_tuple=False)
if indices.shape[0] == 0:
# if flow is not entirely equally distributed, there should be at least 1 value which is above the mean
#self.logger.warn("Fallback in Dataloading bacause no values remain after filtering.")
indices = torch.gt(amplitude, mean).nonzero(as_tuple=False)
indices = indices + np.asarray([[self.valid_h[0], self.valid_w[0]]], dtype=np.int)
# check if indices is not empty, if so, sample another frame (error is catched in __getitem__())
if indices.shape[0] == 0:
raise FlowError(path=[],msg=f"Empty indices array at index {ids[0]}....")
# shift ids to match size of real flow patch
indices = (indices[:, 0], indices[:, 1])
# generate number of pokes
n_pokes = int(
np.random.randint(
1, min(self.config["n_pokes"], int(indices[0].shape[0])) + 1
)
)
if seq_len_idx == -1:
ids_mgn = np.random.randint(indices_mgn[0].shape[0], size=n_pokes)
row_ids_mgn = indices_mgn[0][ids_mgn]
col_ids_mgn = indices_mgn[1][ids_mgn]
# and generate the actual pokes
ids = np.random.randint(indices[0].shape[0], size=n_pokes)
row_ids = indices[0][ids]
col_ids = indices[1][ids]
pokes = []
half_poke_size = int(self.poke_size / 2)
zeros = torch.zeros_like(flow)
poke_targets = []
for n,ids in enumerate(zip(row_ids, col_ids)):
poke = zeros
if seq_len_idx == -1:
poke_target =flow[:,row_ids_mgn[n],col_ids_mgn[n]].unsqueeze(-1).unsqueeze(-1) if self.equal_poke_val else \
flow[:,row_ids_mgn[n] - half_poke_size:row_ids_mgn[n] + half_poke_size +1,
col_ids_mgn[n] - half_poke_size:col_ids_mgn[n] + half_poke_size +1]
else:
poke_target = flow[:,ids[0],ids[1]].unsqueeze(-1).unsqueeze(-1) if self.equal_poke_val else flow[:,
ids[0] - half_poke_size : ids[0] + half_poke_size + 1,
ids[1] - half_poke_size : ids[1] + half_poke_size + 1,]
poke[
:,
ids[0] - half_poke_size: ids[0] + half_poke_size + 1,
ids[1] - half_poke_size: ids[1] + half_poke_size + 1,
] = poke_target
pokes.append(poke)
loc_and_poke = (ids,poke_target)
poke_targets.append(loc_and_poke)
# unsqueeze in case of num_pokes = 1
if self.flow_weights:
if self.yield_videos:
if seq_len_idx == -1:
complete_mask = np.ones(self.config["spatial_size"], dtype=np.bool)
else:
complete_mask = np.logical_or(np.logical_or(self.mask["img_tgt"],self.mask["img_start"]), self.mask["img_mid"])
mask_ids = np.nonzero(complete_mask)
try:
min_h = mask_ids[0].min()
max_h = mask_ids[0].max()
min_w = mask_ids[1].min()
max_w = mask_ids[1].max()
weights = np.full(self.mask["img_start"].shape,self.weight_value_bg)
weights[min_h:max_h,min_w:max_w] = self.weight_value_flow
except Exception as e:
self.logger.warn(f'Catch exception in "dataset._get_poke()": {e.__class__.__name__}: "{e}". Using full image instead of patch....')
weights = np.full(self.mask["img_start"].shape,self.weight_value_bg)
weights[self.valid_h[0]:self.valid_h[1],self.valid_w[0]:self.valid_w[1]] = self.weight_value_flow
#weights = np.where(complete_mask,np.full_like(complete_mask,self.weight_value_flow,dtype=np.float),np.full_like(complete_mask,self.weight_value_bg,dtype=np.float),)
else:
weights = np.where(self.mask["img_tgt"],np.full_like(self.mask["img_tgt"],self.weight_value_flow,dtype=np.float),np.full_like(self.mask["img_tgt"],self.weight_value_bg,dtype=np.float),)
# poke regions get higher weights
# for poke in pokes:
# weights = np.where(((poke**2).sum(0)>0),np.full_like(weights,self.weight_value_poke),weights)
weights = torch.from_numpy(weights)
pokes = torch.stack(pokes, dim=0).squeeze(0)
if "yield_poke_target" in kwargs:
return pokes, weights, poke_targets
return pokes, weights
else:
pokes = torch.stack(pokes, dim=0).squeeze(0)
if "yield_poke_target" in kwargs:
return pokes, poke_targets
return pokes
def _get_flow_series(self, ids, step_width=10, **kwargs):
all_flows = []
for i in range(1, step_width+1):
new_ids = (ids[0] + i * (1 + self.valid_lags[0]) * 5, self.valid_lags[0], ids[1])
flow = self._get_3d_flow(new_ids)
all_flows.append(flow)
return torch.from_numpy(np.stack(all_flows, axis=0))
# extracts entire flow
def _get_flow(self, ids, **kwargs):
if self.flow_in_ram:
flow = torch.from_numpy(self.loaded_flows[ids[0]])
else:
flow_path = self.datadict["flow_paths"][ids[0], self.valid_lags[0]]
# debug, this path seems to be erroneous
# flow_path = "/export/data/ablattma/Datasets/plants/processed_crops/VID_0_3_1024x1024/prediction_3_28.flow.npy"
try:
flow = np.load(flow_path)
except ValueError:
try:
flow = np.load(flow_path,allow_pickle=True)
except Exception as ex:
print(ex)
raise FlowError(flow_path)
except:
raise FlowError(flow_path)
if self.normalize_flows:
flow = flow / self.flow_norms["max_norm"][self.valid_lags[0]]
elif not self.normalize_flows and self.scale_poke_to_res:
# scaling of poke magnitudes to current resolution
flow = flow / (flow.shape[1]/self.config["spatial_size"][0])
dsize = self.config["spatial_size"]
flow = F.interpolate(
torch.from_numpy(flow).unsqueeze(0), size=dsize, mode="bilinear",align_corners=True
).squeeze(0)
if ids[-1] == -1:
flow = torch.zeros_like(flow)
if self.geom_transfs is not None:
c1 = Image.fromarray(flow[0].numpy(),mode="F")
c2 = Image.fromarray(flow[1].numpy(),mode="F")
for tr in self.geom_transfs:
c1 = tr(c1)
c2 = tr(c2)
flow = torch.from_numpy(np.stack([np.array(c1.getdata()).reshape(c1.size[0],c1.size[1]),
np.array(c2.getdata()).reshape(c2.size[0],c2.size[1])],axis=0)).to(torch.float)
return flow
def _get_color_transforms(self):
# to make sure, the transformations are always coherent within the same sample
make_trans = bool(np.random.choice(np.arange(2), size=1, p=[1 - self.p_col ,self.p_col]))
brightness_val = float(np.random.uniform(-self.ab,self.ab,1)) if self.ab > 0. and make_trans else 0.
contrast_val = float(np.random.uniform(-self.ac, self.ac, 1)) if self.ac > 0. and make_trans else 0.
hue_val = float(np.random.uniform(-self.ah, 2 * self.ah, 1)) if self.ah > 0. and make_trans else 0.
saturation_val = 1. + (float(np.random.uniform(-self.a_s,self.a_s)) if self.a_s > 0. and make_trans else 0)
b_T = partial(FT.adjust_brightness,brightness_factor=1. + brightness_val)
c_T = partial(FT.adjust_contrast,contrast_factor=1. + contrast_val)
h_T = partial(FT.adjust_hue, hue_factor=hue_val)
s_T = partial(FT.adjust_saturation,saturation_factor =saturation_val)
return [b_T,c_T,h_T,s_T]
def _get_geometric_transforms(self):
# to make sure, the transformations are always coherent within the same sample
make_trans = bool(np.random.choice(np.arange(2),size=1,p=[1-self.p_geom,self.p_geom]))
rval = float(np.random.uniform(-self.ad,self.ad,1)) if self.ad > 0. and make_trans else 0.
tval_vert = int(np.random.randint(int(-self.at[0] * self.config["spatial_size"][1] / 2), int(self.at[0] * self.config["spatial_size"][1] / 2), 1)) if self.at[0] > 0 and make_trans else 0
tval_hor = int(np.random.randint(int(-self.at[1] * self.config["spatial_size"][0] / 2), int(self.at[1] * self.config["spatial_size"][0] / 2), 1)) if self.at[1] > 0 and make_trans else 0
a_T = partial(FT.affine,angle=rval,translate=(tval_hor,tval_vert),scale=1.0,shear=0)
p = partial(FT.pad,padding=(int(self.config["spatial_size"][0] / 2), int(self.config["spatial_size"][1] / 2)),padding_mode="reflect")
c = partial(FT.center_crop,output_size=self.config["spatial_size"])
return [p,a_T,c]
def _get_flip_transform(self):
flip = bool(np.random.choice([True,False],size=1))
if flip:
return FT.vflip
else:
return None
@abstractmethod
def __len__(self):
# as len at least once before dataloading, generic checks can be put here
assert self.valid_lags is not None
assert self.min_frames is not None
if self.filter_flow:
assert self.flow_width_factor is not None, f"If the dataset shall be filtered, the flow width factor has to be set in the constructor of the respective child class of BaseDataset"
assert isinstance(self.flow_width_factor,int)
if self.flow_weights:
assert self.flow_width_factor is not None
if self.normalize_flows:
assert self.flow_norms is not None
if self.flow_in_ram:
assert len(self.loaded_flows) == self.datadict["flow_paths"].shape[0]
if self.imgs_in_ram:
assert len(self.loaded_imgs) == self.datadict["img_path"].shape[0]
if self.var_sequence_length:
assert self.normalize_flows
assert self.yield_videos
assert len(self.ids_per_seq_len) > 0
assert len(self.object_weights_per_seq_len) == len(self.ids_per_seq_len)
return self.datadict["flow_paths"].shape[0] if isinstance(self.datadict["flow_paths"],np.ndarray) else len(self.datadict["flow_paths"])
@abstractmethod
def _set_instance_specific_values(self):
pass
@abstractmethod
def get_test_app_images(self) -> dict:
pass
| 36,611 | 46.119691 | 255 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/__init__.py | from data.base_dataset import BaseDataset
from torchvision import transforms as tt
from data.flow_dataset import PlantDataset, IperDataset,Human36mDataset, VegetationDataset, LargeVegetationDataset, TaichiDataset
# add key value pair for datasets here, all datasets should inherit from base_dataset
__datasets__ = {"IperDataset": IperDataset,
"PlantDataset": PlantDataset,
"Human36mDataset": Human36mDataset,
"VegetationDataset": VegetationDataset,
"LargeVegetationDataset": LargeVegetationDataset,
"TaichiDataset": TaichiDataset,
}
# returns only the class, not yet an instance
def get_transforms(config):
return {
"PlantDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
"IperDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
"Human36mDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
"VegetationDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
"LargeVegetationDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
"TaichiDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
}
def get_dataset(config, custom_transforms=None):
dataset = __datasets__[config["dataset"]]
if custom_transforms is not None:
print("Returning dataset with custom transform")
transforms = custom_transforms
else:
transforms = get_transforms(config)[config["dataset"]]
return dataset, transforms
| 2,041 | 29.029412 | 129 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/prepare_dataset.py | import os
import cv2
import re
import argparse
import torch
import numpy as np
from os import path, makedirs
import pickle
from tqdm import tqdm
from glob import glob
from natsort import natsorted
import yaml
import multiprocessing as mp
from multiprocessing import Process
from functools import partial
from dotmap import DotMap
from torchvision import transforms as tt
import configparser
from utils.general import parallel_data_prefetch
from data import get_dataset
from data.helper_functions import preprocess_image
h36m_aname2aid = {name: i for i, name in enumerate(["Directions","Discussion","Eating","Greeting","Phoning",
"Posing","Purchases","Sitting","SittingDown","Smoking",
"Photo","Waiting","Walking","WalkDog","WalkTogether"])}
h36m_aname2aid.update({"WalkingTogether": h36m_aname2aid["WalkTogether"]})
h36m_aname2aid.update({"WalkingDog": h36m_aname2aid["WalkDog"]})
h36m_aname2aid.update({"TakingPhoto": h36m_aname2aid["Photo"]})
def _do_parallel_data_prefetch(func, Q, data, idx):
# create dummy dataset instance
# run prefetching
res = func(data)
Q.put([idx, res])
Q.put("Done")
def get_image(vidcap, frame_number,spatial_size=None):
vidcap.set(1, frame_number)
_, img = vidcap.read()
if spatial_size is not None and spatial_size != img.shape[0]:
img=cv2.resize(img,(spatial_size,spatial_size),interpolation=cv2.INTER_LINEAR)
return img
def process_video(f_name, args):
from utils.flownet_loader import FlownetPipeline
from utils.general import get_gpu_id_with_lowest_memory, get_logger
target_gpus = None if len(args.target_gpus) == 0 else args.target_gpus
gpu_index = get_gpu_id_with_lowest_memory(target_gpus=target_gpus)
torch.cuda.set_device(gpu_index)
#f_name = vid_path.split(vid_path)[-1]
logger = get_logger(f"{gpu_index}")
extract_device = torch.device("cuda", gpu_index.index if isinstance(gpu_index,torch.device) else gpu_index)
# load flownet
pipeline = FlownetPipeline()
flownet = pipeline.load_flownet(args, extract_device)
# open video
base_raw_dir = args.raw_dir.split("*")[0]
if not isinstance(f_name,list):
f_name = [f_name]
logger.info(f"Iterating over {len(f_name)} files...")
for fn in tqdm(f_name,):
if fn.startswith('/'):
fn = fn[1:]
vid_path = path.join(base_raw_dir, fn)
# vid_path = f"Code/input/train_data/movies/{fn}"
vidcap = cv2.VideoCapture()
vidcap.open(vid_path)
counter = 0
while not vidcap.isOpened():
counter += 1
time.sleep(1)
if counter > 10:
raise Exception("Could not open movie")
# get some metadata
number_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
#upright = height > widt
# create target path if not existent
if args.data.dataset == 'Human36mDataset':
vid_name = fn.split('/')[-1]
if 'ALL' in vid_name:
continue
action = vid_name.split(' ')[0] if ' ' in vid_name else vid_name.split('.')[0]
same_action_videos =list(filter(lambda y : y.startswith(action) and re.search(r'\d+$', y.split('.')[0]) is not None,
map(lambda x: x.split('/')[-1],f_name)))
subject = fn.split('/')[-2]
if re.search(r'\d+$', fn.split('.')[0]) is not None:
subaction_id = int(fn[-1])
else:
max_id = max(map(lambda z: int(z.split(' ')[-1].split('.')[0]), same_action_videos))
if max_id ==2:
subaction_id = 1
else:
subaction_id = 2
cam_id = vid_name.split('.')[1]
base_path = path.join(args.processed_dir,subject,f'{action}-{subaction_id}',cam_id)
else:
base_path = path.join(args.processed_dir, fn.split(".")[0]) #.replace(str,str(args.spatial_size)))
# base_path = f"Code/input/train_data/images/{f_name.split('.')[0]}/"
makedirs(base_path, exist_ok=True)
delta = args.flow_delta
diff = args.flow_max
# begin extraction
for frame_number in range(0, number_frames, args.frames_discr):
# break if not enough frames to properly extract sequence
if frame_number >= number_frames - diff * args.frames_discr:
break
first_fidx, second_fidx = frame_number, frame_number + diff * args.frames_discr
image_target_file = path.join(base_path, f"frame_{frame_number}.png")
# image_target_file = f"{base_path}frame_{frame_number}.png"
# FRAME
if not path.exists(image_target_file):
# write frame itself
img = get_image(vidcap, frame_number)
if img is None:
continue
# if upright:
# img = cv2.transpose(img)
try:
if args.spatial_size is None:
success = cv2.imwrite(image_target_file, img)
else:
img_res = cv2.resize(img,(args.spatial_size,args.spatial_size), interpolation=cv2.INTER_LINEAR)
success = cv2.imwrite(image_target_file,img_res)
except cv2.error as e:
print(e)
continue
except Exception as ex:
print(ex)
continue
# if success:
# logger.info(f'wrote img with shape {img.shape} to "{image_target_file}".')
# FLOW
for d in range(0, diff*args.frames_discr, delta*args.frames_discr):
if second_fidx - d < number_frames:
flow_target_file = path.join(
base_path, f"prediction_{first_fidx}_{second_fidx-d}.flow"
)
if not os.path.exists(flow_target_file + ".npy"):
# predict and write flow prediction
img, img2 = (
get_image(vidcap, first_fidx),
get_image(vidcap, second_fidx - d),
)
image_target_file2 = path.join(base_path, f"frame_{second_fidx - d}.png")
if not path.exists(image_target_file2):
try:
if args.spatial_size is None:
success = cv2.imwrite(image_target_file2, img2)
else:
img_res2 = cv2.resize(img2, (args.spatial_size, args.spatial_size), interpolation=cv2.INTER_LINEAR)
success = cv2.imwrite(image_target_file2, img_res2)
except cv2.error as e:
print(e)
continue
except Exception as ex:
print(ex)
continue
sample = pipeline.preprocess_image(img, img2, "BGR",spatial_size=args.input_size).to(
extract_device
)
prediction = (
pipeline.predict(flownet, sample[None],spatial_size=args.spatial_size)
.cpu()
.detach()
.numpy()
)
np.save(flow_target_file, prediction)
logger.info(
f'Finish processing video sequence "{fn}".')
return "Finish"
def extract(args):
# if args.process_vids:
base_dir = args.raw_dir.split("*")[0]
if not args.raw_dir.endswith('*'):
args.raw_dir =path.join(args.raw_dir,'*')
data_names = [p.split(base_dir)[-1] for p in glob(args.raw_dir) if p.endswith(args.video_format)]
# data_names = [d for d in data_names if d in ['/VID_0_5.mkv','/VID_7_0.mkv']]
fn_extract = partial(process_video, args=args)
Q = mp.Queue(1000)
# step = (
# int(len(data_names) / args.num_workers + 1)
# if len(data_names) % args.num_workers != 0
# else int(len(data_names) / args.num_workers)
# )
splits = np.array_split(np.arange(len(data_names)), args.num_workers)
arguments = [
[fn_extract, Q, part, i]
for i, part in enumerate(
[data_names[s[0]:s[-1]+1] for s in splits]
)
]
processes = []
for i in range(args.num_workers):
p = Process(target=_do_parallel_data_prefetch, args=arguments[i])
processes += [p]
start = time.time()
gather_res = [[] for _ in range(args.num_workers)]
try:
for p in processes:
p.start()
time.sleep(20)
k = 0
while k < args.num_workers:
# get result
res = Q.get()
if res == "Done":
k += 1
else:
gather_res[res[0]] = res[1]
except Exception as e:
print("Exception: ", e)
for p in processes:
p.terminate()
raise e
finally:
for p in processes:
p.join()
print(f"Prefetching complete. [{time.time() - start} sec.]")
def prepare(args):
logger = get_logger("dataset_preparation")
datadict = {
"img_path": [],
"flow_paths": [],
"fid": [],
"vid": [],
"img_size": [],
"flow_size": [],
"object_id":[],
"max_fid": []
}
if "iPER" in args.processed_dir.split("/") or "human36m" in args.processed_dir.split("/") or \
"human3.6M" in args.processed_dir.split("/") :
datadict.update({"action_id": [], "actor_id": []})
train_test_split = args.data.dataset == 'Human36mDataset' or args.data.dataset == 'TaichiDataset'
fmax = args.flow_max
fdelta = args.flow_delta
fd = args.frames_discr
if train_test_split:
datadict.update({"train": []})
if args.data.dataset == 'TaichiDataset':
oname2oid = {}
# logger.info(f'Metafile is stored as "{args.meta_file_name}.p".')
# logger.info(f"args.check_imgs is {args.check_imgs}")
max_flow_length = int(fmax / fdelta)
# if args.process_vids:
if train_test_split:
if args.data.dataset == 'Human36mDataset':
videos = [d for d in glob(path.join(args.processed_dir, "*", "*", '*')) if path.isdir(d)]
else:
videos = [d for d in glob(path.join(args.processed_dir, "*", "*")) if path.isdir(d)]
else:
videos = [d for d in glob(path.join(args.processed_dir, "*")) if path.isdir(d)]
videos = natsorted(videos)
actual_oid = 0
for vid, vid_name in enumerate(videos):
images = glob(path.join(vid_name, "*.png"))
images = natsorted(images)
actor_id = action_id = train = None
if args.data.dataset == 'PlantDataset':
object_id = int(vid_name.split("/")[-1].split("_")[1])
elif args.data.dataset == 'IperDataset':
object_id = 100 * int(vid_name.split("/")[-1].split("_")[0]) + int(vid_name.split("/")[-1].split("_")[1])
actor_id = int(vid_name.split("/")[-1].split("_")[0])
action_id = int(vid_name.split("/")[-1].split("_")[-1])
elif args.data.dataset == 'TaichiDataset':
train = "train" == vid_name.split("/")[-2]
msg = "train" if train else "test"
print(f"Video in {msg}-split")
obj_name = vid_name.split("/")[-1].split("#")[0]
if obj_name in oname2oid.keys():
object_id = oname2oid[obj_name]
else:
object_id = actual_oid
oname2oid.update({obj_name: actual_oid})
actual_oid += 1
elif args.data.dataset == 'Human36mDataset':
actor_id = int(vid_name.split('/')[-3][1:])
object_id = actor_id
action_name = vid_name.split('/')[-2].split('-')[0]
action_id = h36m_aname2aid[action_name]
train = actor_id not in [9,11]
else:
raise ValueError("invalid dataset....")
# max_flow_id = [len(images) - flow_step -1 for flow_step in range(fdelta*fd,fmax*fd+1, fdelta*fd)]
for i, img_path in enumerate(
tqdm(
images,
desc=f'Extracting meta information of video "{vid_name.split(args.processed_dir)[-1]}"',
)
):
fid = int(img_path.split("_")[-1].split(".")[0])
#search_pattern = f'[{",".join([str(fid + n) for n in range(args.flow_delta,args.flow_max + 1, args.flow_delta)])}]'
flows = natsorted([s for s in glob(path.join(vid_name, f"prediction_{fid}_*.npy"))
if (int(s.split("_")[-1].split(".")[0]) - int(s.split("_")[-2])) % (fdelta * fd) == 0 and
int(s.split("_")[-1].split(".")[0]) - int(s.split("_")[-2]) <= fmax*fd])
# skip example if second image path does not exist
if any(map(lambda p: not path.isfile(path.join(vid_name, f'frame_{p.split("_")[-1].split(".")[0]}.png')),flows)):
logger.info(f'Breaking meta file information processing earlier for video "{vid_name.split("/")[-1]}", since not all image frames have been extracted.')
break
# make relative paths
img_path_rel = img_path.split(args.processed_dir)[1]
flows_rel = [f.split(args.processed_dir)[1] for f in flows]
# filter flows
flows_rel = [f for f in flows_rel if (int(f.split("/")[-1].split(".")[0].split("_")[-1]) - int(f.split("/")[-1].split(".")[0].split("_")[-2])) <= fmax*fd]
if len(flows_rel) < max_flow_length:
diff = max_flow_length-len(flows_rel)
[flows_rel.insert(len(flows_rel),last_flow_paths[len(flows_rel)]) for _ in range(diff)]
w_img = args.spatial_size
h_img = args.spatial_size
if len(flows) > 0:
w_f = args.spatial_size
h_f = args.spatial_size
else:
h_f = w_f = None
assert len(flows_rel) == max_flow_length
datadict["img_path"].append(img_path_rel)
datadict["flow_paths"].append(flows_rel)
datadict["fid"].append(fid)
datadict["vid"].append(vid)
# image size compliant with numpy and torch
datadict["img_size"].append((h_img, w_img))
datadict["flow_size"].append((h_f, w_f))
datadict["object_id"].append(object_id)
# datadict["max_fid"].append(max_flow_id)
if action_id is not None:
datadict["action_id"].append(action_id)
if actor_id is not None:
datadict["actor_id"].append(actor_id)
if train is not None:
datadict["train"].append(train)
last_flow_paths = flows_rel
logger.info(f'Prepared dataset consists of {len(datadict["img_path"])} samples.')
# Store data (serialize)
save_path = path.join(
args.processed_dir, "meta.p"
)
with open(save_path, "wb") as handle:
pickle.dump(datadict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load_flow(flow_paths):
norms = []
for i, flow_path in enumerate(tqdm(flow_paths)):
# debug, this path seems to be erroneous
# flow_path = "/export/data/ablattma/Datasets/plants/processed_crops/VID_0_3_1024x1024/prediction_3_28.flow.npy"
try:
flow = np.load(flow_path)
except Exception as e:
print(e)
continue
n = np.linalg.norm(flow,2,0)
min_norm = np.amin(n)
max_norm = np.amax(n)
norms.append(np.stack([max_norm,min_norm]))
norms = np.stack(norms,0)
return norms
def norms(cfg_dict):
cfg_dict['data']['normalize_flows'] = False
transforms = tt.Compose(
[tt.ToTensor(), tt.Lambda(lambda x: (x * 2.0) - 1.0)]
)
datakeys = ["flow", "images"]
dataset, _ = get_dataset(config=cfg_dict["data"])
test_dataset = dataset(transforms, datakeys, cfg_dict["data"], train=True)
flow_paths = test_dataset.data["flow_paths"]
stats_dict = {"max_norm": [], "min_norm": [], "percentiles": []}
for i in range(flow_paths.shape[-1]):
test_dataset.logger.info(f'Computing mean of flow with lag {(i + 1) * cfg_dict["flow_delta"]}')
norms = parallel_data_prefetch(load_flow, flow_paths[:, i], cfg_dict['data']['num_workers'])
max_n = np.amax(norms[:, 0])
min_n = np.amin(norms[:, 1])
percs_at = list(range(10, 100, 10))
percs = np.percentile(norms[:, 0], percs_at)
stats_dict["percentiles"].append({pa: p for pa, p in zip(percs_at, percs)})
stats_dict["max_norm"].append(float(max_n))
stats_dict["min_norm"].append(float(min_n))
# save
savepath = path.join(test_dataset.datapath, "dataset_stats.p")
with open(savepath, "wb") as handle:
pickle.dump(stats_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def process_flows(flow_data):
out = np.zeros((len(flow_data), 3))
for i, dp in enumerate(tqdm(flow_data)):
flow = np.load(dp[0])
lag = dp[2]
test_dataset = dp[3]
# flow = flow - test_dataset.flow_norms["min_norm"][test_dataset.valid_lags[0]]
flow = flow / test_dataset.flow_norms["max_norm"][lag]
img = cv2.imread(dp[1])
# image is read in BGR
img = preprocess_image(img, swap_channels=True)
mask = np.zeros(img.shape[:2], np.uint8)
# rect defines starting background area
if test_dataset.filter_flow:
rect = (
int(img.shape[1] / test_dataset.flow_width_factor), test_dataset.valid_h[0], int((test_dataset.flow_width_factor - 2) / test_dataset.flow_width_factor * img.shape[1]),
test_dataset.valid_h[1] - test_dataset.valid_h[0])
# initialize background and foreground models
fgm = np.zeros((1, 65), dtype=np.float64)
bgm = np.zeros((1, 65), dtype=np.float64)
# apply grab cut algorithm
mask2, fgm, bgm = cv2.grabCut(img, mask, rect, fgm, bgm, 5, cv2.GC_INIT_WITH_RECT)
amplitude = np.linalg.norm(flow[:, test_dataset.valid_h[0]:test_dataset.valid_h[1], test_dataset.valid_w[0]:test_dataset.valid_w[1]], 2, axis=0)
if test_dataset.filter_flow:
# only consider the part of the mask which corresponds to the region considered in flow
amplitude_filt = np.where(mask2[test_dataset.valid_h[0]:test_dataset.valid_h[1], test_dataset.valid_w[0]:test_dataset.valid_w[1]], amplitude, np.zeros_like(amplitude))
else:
amplitude_filt = amplitude
std = amplitude_filt.std()
mean = np.mean(amplitude_filt)
indices = np.argwhere(np.greater(amplitude_filt, mean + (std * 2.0)))
if indices.shape[0] == 0:
indices = np.argwhere(np.greater(amplitude_filt, np.mean(amplitude_filt) + amplitude_filt.std()))
if indices.shape[0] == 0:
print("Fallback in Dataloading bacause no values remain after filtering.")
# there should be at least one element that is above the mean if flows are not entirely equally distributed
indices = np.argwhere(np.greater(amplitude_filt, mean))
if indices.shape[0] == 0:
print("strange case, cannot occure, skip")
out[i, -1] = 1
continue
values = np.asarray([amplitude_filt[idx[0], idx[1]] for idx in indices])
out[i, 0] = values.min()
out[i, 1] = values.max()
return out
def stats(cfg_dict):
logger=get_logger("stats_calculation")
cfg_dict['data']['normalize_flows'] = True
transforms = tt.Compose(
[tt.ToTensor(), tt.Lambda(lambda x: (x * 2.0) - 1.0)]
)
datakeys = ["flow", "images"]
dataset, _ = get_dataset(config=cfg_dict["data"])
test_dataset = dataset(transforms, datakeys, cfg_dict["data"], train=True)
all_frange_data = []
for l in tqdm(range(test_dataset.data['flow_paths'].shape[-1])):
logger.info(f'Calculating stats for lag of {(l+1) * cfg_dict["flow_delta"]} frames...')
in_data = [(f, i, l, test_dataset) for f, i in zip(test_dataset.data["flow_paths"][:, l], test_dataset.data["img_path"])]
out_data = parallel_data_prefetch(process_flows,in_data[:100], n_proc=20, cpu_intensive=True, target_data_type="list")
all_frange_data.append(out_data)
n_error = np.count_nonzero(out_data[:, 2])
logger.info(f"While loading the data, {n_error} errors occurred.")
all_frange_data = np.stack(all_frange_data,axis=-1)
assert all_frange_data.shape[-1] == test_dataset.datadict['flow_paths'].shape[-1]
with open(path.join(test_dataset.datapath,f"{test_dataset.metafilename}.p"),"rb") as f:
datadict = pickle.load(f)
#assert out_data.shape[0] == len(datadict["img_path"])
key = "flow_range"
name_key = "frange"
datadict.update({key: all_frange_data})
with open(path.join(test_dataset.datapath, f"{test_dataset.metafilename}_{name_key}.p"), "wb") as f:
pickle.dump(datadict, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
import time
from utils.general import get_logger
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config',type=str,required=True,help='Config file containing all parameters.')
config_args = parser.parse_args()
fpath = path.dirname(path.realpath(__file__))
configfile = path.abspath(path.join(fpath,f'../{config_args.config}'))
with open(configfile,'r') as f:
args = yaml.load(f,Loader=yaml.FullLoader)
cfg_dict = args
args = DotMap(args)
if args.data.dataset == 'Human3.6mDataset':
h36config = configparser.ConfigParser()
h36config.read(path.join(fpath, 'config.ini'))
args.raw_dir = path.join(h36config['General']['TARGETDIR'], 'videos','*','*')
cfg_dict['data']['datapath'] = args.processed_dir
if args.raw_dir == '':
raise ValueError(f'The data holding directory is currently not defined. please define the field "raw_dir" in "{config_args.config}"')
if args.processed_dir == '':
raise ValueError(f'The target directory for the extracted image frames and flow maps is currently undefined. Please define the field "processed_dir" in "{config_args.config}"')
pool = []
torch.multiprocessing.set_start_method("spawn")
if args.mode == "extract":
extract(args)
elif args.mode == "prepare": # in this case, it is prepare
prepare(args)
elif args.mode == 'stats':
# stats(cfg_dict)
raise NotImplementedError()
elif args.mode == 'norms':
# norms(cfg_dict)
raise NotImplementedError()
elif args.mode == 'all':
extract(args)
prepare(args)
# norms(cfg_dict)
# stats(cfg_dict)
else:
raise ValueError(f'The "mode"-parameter in config file "{configfile}" must be in [all, extract, prepare, norms, stats], but is actually "{args.mode}"...')
| 23,809 | 36.974482 | 185 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/samplers.py | import numpy as np
from torch.utils.data import BatchSampler,RandomSampler,SequentialSampler, WeightedRandomSampler
from data.base_dataset import BaseDataset
from data.flow_dataset import PlantDataset
class SequenceSampler(BatchSampler):
def __init__(self, dataset:BaseDataset, batch_size, shuffle, drop_last):
assert isinstance(dataset, BaseDataset), "The used dataset in Sequence Sampler must inherit from BaseDataset"
if shuffle:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
super().__init__(sampler, batch_size, drop_last)
self.dataset = dataset
#self.max_lag = self.dataset.datadict["flow_paths"].shape[1]
def __iter__(self):
batch = []
# sample sequence length
lag = int(np.random.choice(self.dataset.valid_lags, 1))
for idx in self.sampler:
batch.append((idx, lag))
if len(batch) == self.batch_size:
yield batch
batch = []
# sample sequence length
lag = int(np.random.choice(self.dataset.valid_lags, 1))
if len(batch) > 0 and not self.drop_last:
yield batch
class FixedLengthSampler(BatchSampler):
def __init__(self, dataset:PlantDataset,batch_size,shuffle,drop_last, weighting, zero_poke,zero_poke_amount=None):
if shuffle:
if weighting:
sampler = WeightedRandomSampler(weights=dataset.datadict["weights"],num_samples=len(dataset))
else:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
super().__init__(sampler, batch_size, drop_last)
self.shuffle = shuffle
self.dataset = dataset
self.zero_poke = zero_poke
self.zero_poke_amount = zero_poke_amount
if self.zero_poke:
assert self.zero_poke_amount is not None
def __iter__(self):
batch = []
if self.zero_poke:
# sample a certain proportion to be zero pokes
zero_poke_ids = np.random.choice(np.arange(self.dataset.__len__()),size=int(self.dataset.__len__()/ self.zero_poke_amount),replace=False).tolist()
self.dataset.logger.info(f"Sampling {len(zero_poke_ids)} zeropokes for next epoch")
else:
zero_poke_ids = []
for idx in self.sampler:
if idx in zero_poke_ids:
batch.append(-1)
else:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
class SequenceLengthSampler(BatchSampler):
def __init__(self, dataset:BaseDataset, batch_size, shuffle, drop_last, n_frames=None, zero_poke = False,):
assert isinstance(dataset, BaseDataset), "The used dataset in Sequence Sampler must inherit from BaseDataset"
assert dataset.var_sequence_length and dataset.yield_videos, "The dataset has to be run in sequence mode and has to output variable sequence lengths"
sampler = SequentialSampler(dataset)
super().__init__(sampler, batch_size, drop_last)
self.dataset = dataset
self.shuffle = shuffle
if n_frames is not None:
assert n_frames >= self.dataset.min_frames and n_frames <=(self.dataset.min_frames + self.dataset.max_frames)
self.n_frames = (n_frames-self.dataset.min_frames)
else:
self.n_frames = n_frames
self.start_n_frames = -1 if zero_poke else 0
if zero_poke:
if self.dataset.train:
self.len_p = np.asarray([self.dataset.zeropoke_weight] + [1.] * self.dataset.max_frames)
else:
self.len_p = np.asarray([1.] * (self.dataset.max_frames + 1))
else:
self.len_p = np.asarray([1.] * self.dataset.max_frames)
if self.dataset.longest_seq_weight != None and self.dataset.train:
self.len_p[-1] = self.dataset.longest_seq_weight
if zero_poke:
# to keep sufficient outside pokes for the model to learn foreground and background
self.len_p[0] = self.dataset.longest_seq_weight / 2
self.len_p = self.len_p /self.len_p.sum()
def __iter__(self):
batch = []
# sample sequence length
if self.shuffle:
# -1 corresponds to
n_frames = int(np.random.choice(np.arange(self.start_n_frames,self.dataset.max_frames), 1, p=self.len_p))
else:
last_n = self.start_n_frames
n_frames = last_n
if n_frames == -1:
n_frames_actual = int(np.random.choice(np.arange(self.dataset.max_frames), 1))
appended = (n_frames, n_frames_actual)
else:
appended = (n_frames, None)
for idx in self.sampler:
appended = (appended[0] if self.n_frames is None else self.n_frames,appended[1])
batch.append(appended)
if len(batch) == self.batch_size:
yield batch
batch = []
# sample sequence length
if self.shuffle:
n_frames = int(np.random.choice(np.arange(self.start_n_frames,self.dataset.max_frames), 1,p=self.len_p))
else:
n_frames = last_n+1 if last_n<self.dataset.max_frames-1 else self.start_n_frames
last_n = n_frames
if n_frames == -1:
n_frames_actual = int(np.random.choice(np.arange(self.dataset.max_frames), 1))
appended = (n_frames, n_frames_actual)
else:
appended = (n_frames, None)
if len(batch) > 0 and not self.drop_last:
yield batch | 5,888 | 38.26 | 158 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/tacotron2/train_tacotron2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Tacotron2."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import yaml
from tqdm import tqdm
import tensorflow_tts
from examples.tacotron2.tacotron_dataset import CharactorMelDataset
from tensorflow_tts.configs.tacotron2 import Tacotron2Config
from tensorflow_tts.models import TFTacotron2
from tensorflow_tts.optimizers import AdamWeightDecay, WarmUp
from tensorflow_tts.trainers import Seq2SeqBasedTrainer
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class Tacotron2Trainer(Seq2SeqBasedTrainer):
"""Tacotron2 Trainer class based on Seq2SeqBasedTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_mixed_precision (bool): Use mixed precision or not.
"""
super(Tacotron2Trainer, self).__init__(
steps=steps,
epochs=epochs,
config=config,
strategy=strategy,
is_mixed_precision=is_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"stop_token_loss",
"mel_loss_before",
"mel_loss_after",
"guided_attention_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
self.config = config
def compile(self, model, optimizer):
super().compile(model, optimizer)
self.binary_crossentropy = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
self.mse = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
def _train_step(self, batch):
"""Here we re-define _train_step because apply input_signature make
the training progress slower on my experiment. Note that input_signature
is apply on based_trainer by default.
"""
if self._already_apply_input_signature is False:
self.one_step_forward = tf.function(
self._one_step_forward, experimental_relax_shapes=True
)
self.one_step_evaluate = tf.function(
self._one_step_evaluate, experimental_relax_shapes=True
)
self.one_step_predict = tf.function(
self._one_step_predict, experimental_relax_shapes=True
)
self._already_apply_input_signature = True
# run one_step_forward
self.one_step_forward(batch)
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
def _one_step_evaluate_per_replica(self, batch):
"""One step evaluate per GPU
Tacotron-2 used teacher-forcing when training and evaluation.
So we need pass `training=True` for inference step.
"""
outputs = self._model(**batch, training=True)
_, dict_metrics_losses = self.compute_per_example_losses(batch, outputs)
self.update_eval_metrics(dict_metrics_losses)
def _one_step_predict_per_replica(self, batch):
"""One step predict per GPU
Tacotron-2 used teacher-forcing when training and evaluation.
So we need pass `training=True` for inference step.
"""
outputs = self._model(**batch, training=True)
return outputs
def compute_per_example_losses(self, batch, outputs):
"""Compute per example losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
(
decoder_output,
post_mel_outputs,
stop_token_predictions,
alignment_historys,
) = outputs
mel_loss_before = calculate_3d_loss(
batch["mel_gts"], decoder_output, loss_fn=self.mae
)
mel_loss_after = calculate_3d_loss(
batch["mel_gts"], post_mel_outputs, loss_fn=self.mae
)
# calculate stop_loss
max_mel_length = (
tf.reduce_max(batch["mel_lengths"])
if self.config["use_fixed_shapes"] is False
else [self.config["max_mel_length"]]
)
stop_gts = tf.expand_dims(
tf.range(tf.reduce_max(max_mel_length), dtype=tf.int32), 0
) # [1, max_len]
stop_gts = tf.tile(
stop_gts, [tf.shape(batch["mel_lengths"])[0], 1]
) # [B, max_len]
stop_gts = tf.cast(
tf.math.greater_equal(stop_gts, tf.expand_dims(batch["mel_lengths"], 1)),
tf.float32,
)
stop_token_loss = calculate_2d_loss(
stop_gts, stop_token_predictions, loss_fn=self.binary_crossentropy
)
# calculate guided attention loss.
attention_masks = tf.cast(
tf.math.not_equal(batch["g_attentions"], -1.0), tf.float32
)
loss_att = tf.reduce_sum(
tf.abs(alignment_historys * batch["g_attentions"]) * attention_masks,
axis=[1, 2],
)
loss_att /= tf.reduce_sum(attention_masks, axis=[1, 2])
per_example_losses = (
stop_token_loss + mel_loss_before + mel_loss_after + loss_att
)
dict_metrics_losses = {
"stop_token_loss": stop_token_loss,
"mel_loss_before": mel_loss_before,
"mel_loss_after": mel_loss_after,
"guided_attention_loss": loss_att,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# predict with tf.function for faster.
outputs = self.one_step_predict(batch)
(
decoder_output,
mel_outputs,
stop_token_predictions,
alignment_historys,
) = outputs
mel_gts = batch["mel_gts"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
mels_before = decoder_output.values[0].numpy()
mels_after = mel_outputs.values[0].numpy()
mel_gts = mel_gts.values[0].numpy()
alignment_historys = alignment_historys.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
mels_before = decoder_output.numpy()
mels_after = mel_outputs.numpy()
mel_gts = mel_gts.numpy()
alignment_historys = alignment_historys.numpy()
utt_ids = utt_ids.numpy()
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (mel_gt, mel_before, mel_after, alignment_history) in enumerate(
zip(mel_gts, mels_before, mels_after, alignment_historys), 0
):
mel_gt = tf.reshape(mel_gt, (-1, 80)).numpy() # [length, 80]
mel_before = tf.reshape(mel_before, (-1, 80)).numpy() # [length, 80]
mel_after = tf.reshape(mel_after, (-1, 80)).numpy() # [length, 80]
# plot figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
im = ax1.imshow(np.rot90(mel_gt), aspect="auto", interpolation="none")
ax1.set_title("Target Mel-Spectrogram")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1)
ax2.set_title(f"Predicted Mel-before-Spectrogram @ {self.steps} steps")
im = ax2.imshow(np.rot90(mel_before), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2)
ax3.set_title(f"Predicted Mel-after-Spectrogram @ {self.steps} steps")
im = ax3.imshow(np.rot90(mel_after), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax3)
plt.tight_layout()
plt.savefig(figname)
plt.close()
# plot alignment
figname = os.path.join(dirname, f"{idx}_alignment.png")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title(f"Alignment @ {self.steps} steps")
im = ax.imshow(
alignment_history, aspect="auto", origin="lower", interpolation="none"
)
fig.colorbar(im, ax=ax)
xlabel = "Decoder timestep"
plt.xlabel(xlabel)
plt.ylabel("Encoder timestep")
plt.tight_layout()
plt.savefig(figname)
plt.close()
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="pretrained weights .h5 file to load weights from. Auto-skips non-matching layers",
)
parser.add_argument(
"--use-fal",
default=0,
type=int,
help="Use forced alignment guided attention loss or regular",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.mixed_precision = bool(args.mixed_precision)
args.use_norm = bool(args.use_norm)
args.use_fal = bool(args.use_fal)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["mel_length_threshold"]
else:
mel_length_threshold = 0
if config["format"] == "npy":
charactor_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
align_query = "*-alignment.npy" if args.use_fal is True else ""
charactor_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
train_dataset = CharactorMelDataset(
dataset=config["tacotron2_params"]["dataset"],
root_dir=args.train_dir,
charactor_query=charactor_query,
mel_query=mel_query,
charactor_load_fn=charactor_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
reduction_factor=config["tacotron2_params"]["reduction_factor"],
use_fixed_shapes=config["use_fixed_shapes"],
align_query=align_query,
)
# update max_mel_length and max_char_length to config
config.update({"max_mel_length": int(train_dataset.max_mel_length)})
config.update({"max_char_length": int(train_dataset.max_char_length)})
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
train_dataset = train_dataset.create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = CharactorMelDataset(
dataset=config["tacotron2_params"]["dataset"],
root_dir=args.dev_dir,
charactor_query=charactor_query,
mel_query=mel_query,
charactor_load_fn=charactor_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
reduction_factor=config["tacotron2_params"]["reduction_factor"],
use_fixed_shapes=False, # don't need apply fixed shape for evaluation.
align_query=align_query,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = Tacotron2Trainer(
config=config,
strategy=STRATEGY,
steps=0,
epochs=0,
is_mixed_precision=args.mixed_precision,
)
with STRATEGY.scope():
# define model.
tacotron_config = Tacotron2Config(**config["tacotron2_params"])
tacotron2 = TFTacotron2(config=tacotron_config, name="tacotron2")
tacotron2._build()
tacotron2.summary()
if len(args.pretrained) > 1:
tacotron2.load_weights(args.pretrained, by_name=True, skip_mismatch=True)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
# AdamW for tacotron2
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_steps=config["optimizer_params"]["decay_steps"],
end_learning_rate=config["optimizer_params"]["end_learning_rate"],
)
learning_rate_fn = WarmUp(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_schedule_fn=learning_rate_fn,
warmup_steps=int(
config["train_max_steps"]
* config["optimizer_params"]["warmup_proportion"]
),
)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=config["optimizer_params"]["weight_decay"],
beta_1=0.9,
beta_2=0.98,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
)
_ = optimizer.iterations
# compile trainer
trainer.compile(model=tacotron2, optimizer=optimizer)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 18,412 | 33.807183 | 96 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/multiband_melgan_hf/train_multiband_melgan_hf.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Multi-Band MelGAN + MPD."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tensorflow.keras.mixed_precision import experimental as mixed_precision
import tensorflow_tts
from examples.melgan.audio_mel_dataset import AudioMelDataset
from examples.hifigan.train_hifigan import TFHifiGANDiscriminator
from examples.melgan.train_melgan import MelganTrainer, collater
from tensorflow_tts.configs import (
MultiBandMelGANDiscriminatorConfig,
MultiBandMelGANGeneratorConfig,
HifiGANDiscriminatorConfig,
)
from tensorflow_tts.losses import TFMultiResolutionSTFT
from tensorflow_tts.models import (
TFPQMF,
TFMelGANGenerator,
TFMelGANMultiScaleDiscriminator,
TFHifiGANMultiPeriodDiscriminator,
)
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class MultiBandMelganTrainer(MelganTrainer):
"""Multi-Band MelGAN Trainer class based on MelganTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_generator_mixed_precision (bool): Use mixed precision for generator or not.
is_discriminator_mixed_precision (bool): Use mixed precision for discriminator or not.
"""
super(MultiBandMelganTrainer, self).__init__(
config=config,
steps=steps,
epochs=epochs,
strategy=strategy,
is_generator_mixed_precision=is_generator_mixed_precision,
is_discriminator_mixed_precision=is_discriminator_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"adversarial_loss",
"subband_spectral_convergence_loss",
"subband_log_magnitude_loss",
"fullband_spectral_convergence_loss",
"fullband_log_magnitude_loss",
"gen_loss",
"real_loss",
"fake_loss",
"dis_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer, pqmf):
super().compile(gen_model, dis_model, gen_optimizer, dis_optimizer)
# define loss
self.sub_band_stft_loss = TFMultiResolutionSTFT(
**self.config["subband_stft_loss_params"]
)
self.full_band_stft_loss = TFMultiResolutionSTFT(
**self.config["stft_loss_params"]
)
# define pqmf module
self.pqmf = pqmf
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
dict_metrics_losses = {}
per_example_losses = 0.0
audios = batch["audios"]
y_mb_hat = outputs
y_hat = self.pqmf.synthesis(y_mb_hat)
y_mb = self.pqmf.analysis(tf.expand_dims(audios, -1))
y_mb = tf.transpose(y_mb, (0, 2, 1)) # [B, subbands, T//subbands]
y_mb = tf.reshape(y_mb, (-1, tf.shape(y_mb)[-1])) # [B * subbands, T']
y_mb_hat = tf.transpose(y_mb_hat, (0, 2, 1)) # [B, subbands, T//subbands]
y_mb_hat = tf.reshape(
y_mb_hat, (-1, tf.shape(y_mb_hat)[-1])
) # [B * subbands, T']
# calculate sub/full band spectral_convergence and log mag loss.
sub_sc_loss, sub_mag_loss = calculate_2d_loss(
y_mb, y_mb_hat, self.sub_band_stft_loss
)
sub_sc_loss = tf.reduce_mean(
tf.reshape(sub_sc_loss, [-1, self.pqmf.subbands]), -1
)
sub_mag_loss = tf.reduce_mean(
tf.reshape(sub_mag_loss, [-1, self.pqmf.subbands]), -1
)
full_sc_loss, full_mag_loss = calculate_2d_loss(
audios, tf.squeeze(y_hat, -1), self.full_band_stft_loss
)
# define generator loss
gen_loss = 0.5 * (sub_sc_loss + sub_mag_loss) + 0.5 * (
full_sc_loss + full_mag_loss
)
if self.steps >= self.config["discriminator_train_start_steps"]:
p_hat = self._discriminator(y_hat)
p = self._discriminator(tf.expand_dims(audios, 2))
adv_loss = 0.0
for i in range(len(p_hat)):
adv_loss += calculate_3d_loss(
tf.ones_like(p_hat[i][-1]), p_hat[i][-1], loss_fn=self.mse_loss
)
adv_loss /= i + 1
gen_loss += self.config["lambda_adv"] * adv_loss
dict_metrics_losses.update(
{"adversarial_loss": adv_loss},
)
dict_metrics_losses.update({"gen_loss": gen_loss})
dict_metrics_losses.update({"subband_spectral_convergence_loss": sub_sc_loss})
dict_metrics_losses.update({"subband_log_magnitude_loss": sub_mag_loss})
dict_metrics_losses.update({"fullband_spectral_convergence_loss": full_sc_loss})
dict_metrics_losses.update({"fullband_log_magnitude_loss": full_mag_loss})
per_example_losses = gen_loss
return per_example_losses, dict_metrics_losses
def compute_per_example_discriminator_losses(self, batch, gen_outputs):
"""Compute per example discriminator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
y_mb_hat = gen_outputs
y_hat = self.pqmf.synthesis(y_mb_hat)
(
per_example_losses,
dict_metrics_losses,
) = super().compute_per_example_discriminator_losses(batch, y_hat)
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
y_mb_batch_ = self.one_step_predict(batch) # [B, T // subbands, subbands]
y_batch = batch["audios"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
y_mb_batch_ = y_mb_batch_.values[0].numpy()
y_batch = y_batch.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
y_mb_batch_ = y_mb_batch_.numpy()
y_batch = y_batch.numpy()
utt_ids = utt_ids.numpy()
y_batch_ = self.pqmf.synthesis(y_mb_batch_).numpy() # [B, T, 1]
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (y, y_) in enumerate(zip(y_batch, y_batch_), 0):
# convert to ndarray
y, y_ = tf.reshape(y, [-1]).numpy(), tf.reshape(y_, [-1]).numpy()
# plit figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_)
plt.title(f"generated speech @ {self.steps} steps")
plt.tight_layout()
plt.savefig(figname)
plt.close()
# save as wavefile
y = np.clip(y, -1, 1)
y_ = np.clip(y_, -1, 1)
sf.write(
figname.replace(".png", "_ref.wav"),
y,
self.config["sampling_rate"],
"PCM_16",
)
sf.write(
figname.replace(".png", "_gen.wav"),
y_,
self.config["sampling_rate"],
"PCM_16",
)
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train MultiBand MelGAN (See detail in examples/multiband_melgan/train_multiband_melgan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
parser.add_argument(
"--postnets",
default=0,
type=int,
help="using postnets instead of gt mels or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="path of .h5 mb-melgan generator and discriminator to load weights from. must be comma delineated, like ptgen.h5,ptdisc.h5",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
args.postnets = bool(args.postnets)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["multiband_melgan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
if args.postnets is True:
mel_query = "*-postnet.npy"
logging.info("Using postnets")
else:
logging.info("Using GT Mels")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = MultiBandMelganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
with STRATEGY.scope():
# define generator and discriminator
generator = TFMelGANGenerator(
MultiBandMelGANGeneratorConfig(
**config["multiband_melgan_generator_params"]
),
name="multi_band_melgan_generator",
)
multiscale_discriminator = TFMelGANMultiScaleDiscriminator(
MultiBandMelGANDiscriminatorConfig(
**config["multiband_melgan_discriminator_params"]
),
name="multi_band_melgan_discriminator",
)
multiperiod_discriminator = TFHifiGANMultiPeriodDiscriminator(
HifiGANDiscriminatorConfig(**config["hifigan_discriminator_params"]),
name="hifigan_multiperiod_discriminator",
)
pqmf = TFPQMF(
MultiBandMelGANGeneratorConfig(
**config["multiband_melgan_generator_params"]
),
dtype=tf.float32,
name="pqmf",
)
discriminator = TFHifiGANDiscriminator(
multiperiod_discriminator,
multiscale_discriminator,
name="hifigan_discriminator",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_mb_hat = generator(fake_mels)
y_hat = pqmf.synthesis(y_mb_hat)
discriminator(y_hat)
if len(args.pretrained) > 1:
pt_splits = args.pretrained.split(",")
generator.load_weights(pt_splits[0])
discriminator.load_weights(pt_splits[1])
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
generator.summary()
discriminator.summary()
# define optimizer
generator_lr_fn = getattr(
tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"]
)(**config["generator_optimizer_params"]["lr_params"])
discriminator_lr_fn = getattr(
tf.keras.optimizers.schedules,
config["discriminator_optimizer_params"]["lr_fn"],
)(**config["discriminator_optimizer_params"]["lr_params"])
gen_optimizer = tf.keras.optimizers.Adam(
learning_rate=generator_lr_fn,
amsgrad=config["generator_optimizer_params"]["amsgrad"],
)
dis_optimizer = tf.keras.optimizers.Adam(
learning_rate=discriminator_lr_fn,
amsgrad=config["discriminator_optimizer_params"]["amsgrad"],
)
_ = gen_optimizer.iterations
_ = dis_optimizer.iterations
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
pqmf=pqmf,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 19,162 | 33.40395 | 137 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech/train_fastspeech.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train FastSpeech."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import argparse
import logging
import os
import sys
sys.path.append(".")
import numpy as np
import yaml
import tensorflow_tts
import tensorflow_tts.configs.fastspeech as FASTSPEECH_CONFIG
from examples.fastspeech.fastspeech_dataset import CharactorDurationMelDataset
from tensorflow_tts.models import TFFastSpeech
from tensorflow_tts.optimizers import AdamWeightDecay, WarmUp
from tensorflow_tts.trainers import Seq2SeqBasedTrainer
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class FastSpeechTrainer(Seq2SeqBasedTrainer):
"""FastSpeech Trainer class based on Seq2SeqBasedTrainer."""
def __init__(
self, config, strategy, steps=0, epochs=0, is_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_mixed_precision (bool): Use mixed precision or not.
"""
super(FastSpeechTrainer, self).__init__(
steps=steps,
epochs=epochs,
config=config,
strategy=strategy,
is_mixed_precision=is_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = ["duration_loss", "mel_loss_before", "mel_loss_after"]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
self.config = config
def compile(self, model, optimizer):
super().compile(model, optimizer)
self.mse = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
def compute_per_example_losses(self, batch, outputs):
"""Compute per example losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
mel_before, mel_after, duration_outputs = outputs
log_duration = tf.math.log(
tf.cast(tf.math.add(batch["duration_gts"], 1), tf.float32)
)
duration_loss = self.mse(log_duration, duration_outputs)
mel_loss_before = calculate_3d_loss(batch["mel_gts"], mel_before, self.mae)
mel_loss_after = calculate_3d_loss(batch["mel_gts"], mel_after, self.mae)
per_example_losses = duration_loss + mel_loss_before + mel_loss_after
dict_metrics_losses = {
"duration_loss": duration_loss,
"mel_loss_before": mel_loss_before,
"mel_loss_after": mel_loss_after,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# predict with tf.function.
outputs = self.one_step_predict(batch)
mels_before, mels_after, *_ = outputs
mel_gts = batch["mel_gts"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
mels_before = mels_before.values[0].numpy()
mels_after = mels_after.values[0].numpy()
mel_gts = mel_gts.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
mels_before = mels_before.numpy()
mels_after = mels_after.numpy()
mel_gts = mel_gts.numpy()
utt_ids = utt_ids.numpy()
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (mel_gt, mel_before, mel_after) in enumerate(
zip(mel_gts, mels_before, mels_after), 0
):
mel_gt = tf.reshape(mel_gt, (-1, 80)).numpy() # [length, 80]
mel_before = tf.reshape(mel_before, (-1, 80)).numpy() # [length, 80]
mel_after = tf.reshape(mel_after, (-1, 80)).numpy() # [length, 80]
# plit figure and save it
utt_id = utt_ids[idx].decode("utf-8")
figname = os.path.join(dirname, f"{utt_id}.png")
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
im = ax1.imshow(np.rot90(mel_gt), aspect="auto", interpolation="none")
ax1.set_title("Target Mel-Spectrogram")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1)
ax2.set_title("Predicted Mel-before-Spectrogram")
im = ax2.imshow(np.rot90(mel_before), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2)
ax3.set_title("Predicted Mel-after-Spectrogram")
im = ax3.imshow(np.rot90(mel_after), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax3)
plt.tight_layout()
plt.savefig(figname)
plt.close()
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="pretrained checkpoint file to load weights from. Auto-skips non-matching layers",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.mixed_precision = bool(args.mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["mel_length_threshold"]
else:
mel_length_threshold = None
if config["format"] == "npy":
charactor_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
duration_query = "*-durations.npy"
charactor_load_fn = np.load
mel_load_fn = np.load
duration_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = CharactorDurationMelDataset(
root_dir=args.train_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
charactor_load_fn=charactor_load_fn,
mel_load_fn=mel_load_fn,
duration_load_fn=duration_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = CharactorDurationMelDataset(
root_dir=args.dev_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
charactor_load_fn=charactor_load_fn,
mel_load_fn=mel_load_fn,
duration_load_fn=duration_load_fn,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = FastSpeechTrainer(
config=config,
strategy=STRATEGY,
steps=0,
epochs=0,
is_mixed_precision=args.mixed_precision,
)
with STRATEGY.scope():
# define model
fastspeech = TFFastSpeech(
config=FASTSPEECH_CONFIG.FastSpeechConfig(**config["fastspeech_params"])
)
fastspeech._build()
fastspeech.summary()
if len(args.pretrained) > 1:
fastspeech.load_weights(args.pretrained, by_name=True, skip_mismatch=True)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
# AdamW for fastspeech
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_steps=config["optimizer_params"]["decay_steps"],
end_learning_rate=config["optimizer_params"]["end_learning_rate"],
)
learning_rate_fn = WarmUp(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_schedule_fn=learning_rate_fn,
warmup_steps=int(
config["train_max_steps"]
* config["optimizer_params"]["warmup_proportion"]
),
)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=config["optimizer_params"]["weight_decay"],
beta_1=0.9,
beta_2=0.98,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
)
_ = optimizer.iterations
# compile trainer
trainer.compile(model=fastspeech, optimizer=optimizer)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 13,591 | 33.762148 | 95 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech2_libritts/train_fastspeech2.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train FastSpeech2."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import yaml
import json
import tensorflow_tts
from examples.fastspeech2_libritts.fastspeech2_dataset import (
CharactorDurationF0EnergyMelDataset,
)
from tensorflow_tts.configs import FastSpeech2Config
from tensorflow_tts.models import TFFastSpeech2
from tensorflow_tts.optimizers import AdamWeightDecay, WarmUp
from tensorflow_tts.trainers import Seq2SeqBasedTrainer
from tensorflow_tts.utils import (
calculate_2d_loss,
calculate_3d_loss,
return_strategy,
TFGriffinLim,
)
class FastSpeech2Trainer(Seq2SeqBasedTrainer):
"""FastSpeech2 Trainer class based on FastSpeechTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_mixed_precision=False,
stats_path: str = "",
dataset_config: str = "",
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_mixed_precision (bool): Use mixed precision or not.
"""
super(FastSpeech2Trainer, self).__init__(
steps=steps,
epochs=epochs,
config=config,
strategy=strategy,
is_mixed_precision=is_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"duration_loss",
"f0_loss",
"energy_loss",
"mel_loss_before",
"mel_loss_after",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
self.use_griffin = config.get("use_griffin", False)
self.griffin_lim_tf = None
if self.use_griffin:
logging.info(
f"Load griff stats from {stats_path} and config from {dataset_config}"
)
self.griff_conf = yaml.load(open(dataset_config), Loader=yaml.Loader)
self.prepare_grim(stats_path, self.griff_conf)
def prepare_grim(self, stats_path, config):
if not stats_path:
raise KeyError("stats path need to exist")
self.griffin_lim_tf = TFGriffinLim(stats_path, config)
def compile(self, model, optimizer):
super().compile(model, optimizer)
self.mse = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
def compute_per_example_losses(self, batch, outputs):
"""Compute per example losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
mel_before, mel_after, duration_outputs, f0_outputs, energy_outputs = outputs
log_duration = tf.math.log(
tf.cast(tf.math.add(batch["duration_gts"], 1), tf.float32)
)
duration_loss = calculate_2d_loss(log_duration, duration_outputs, self.mse)
f0_loss = calculate_2d_loss(batch["f0_gts"], f0_outputs, self.mse)
energy_loss = calculate_2d_loss(batch["energy_gts"], energy_outputs, self.mse)
mel_loss_before = calculate_3d_loss(batch["mel_gts"], mel_before, self.mae)
mel_loss_after = calculate_3d_loss(batch["mel_gts"], mel_after, self.mae)
per_example_losses = (
duration_loss + f0_loss + energy_loss + mel_loss_before + mel_loss_after
)
dict_metrics_losses = {
"duration_loss": duration_loss,
"f0_loss": f0_loss,
"energy_loss": energy_loss,
"mel_loss_before": mel_loss_before,
"mel_loss_after": mel_loss_after,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# predict with tf.function.
outputs = self.one_step_predict(batch)
mels_before, mels_after, *_ = outputs
mel_gts = batch["mel_gts"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
mels_before = mels_before.values[0].numpy()
mels_after = mels_after.values[0].numpy()
mel_gts = mel_gts.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
mels_before = mels_before.numpy()
mels_after = mels_after.numpy()
mel_gts = mel_gts.numpy()
utt_ids = utt_ids.numpy()
# check directory
if self.use_griffin:
griff_dir_name = os.path.join(
self.config["outdir"], f"predictions/{self.steps}_wav"
)
if not os.path.exists(griff_dir_name):
os.makedirs(griff_dir_name)
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (mel_gt, mel_before, mel_after) in enumerate(
zip(mel_gts, mels_before, mels_after), 0
):
if self.use_griffin:
utt_id = utt_ids[idx]
grif_before = self.griffin_lim_tf(
tf.reshape(mel_before, [-1, 80])[tf.newaxis, :], n_iter=32
)
grif_after = self.griffin_lim_tf(
tf.reshape(mel_after, [-1, 80])[tf.newaxis, :], n_iter=32
)
grif_gt = self.griffin_lim_tf(
tf.reshape(mel_gt, [-1, 80])[tf.newaxis, :], n_iter=32
)
self.griffin_lim_tf.save_wav(
grif_before, griff_dir_name, f"{utt_id}_before"
)
self.griffin_lim_tf.save_wav(
grif_after, griff_dir_name, f"{utt_id}_after"
)
self.griffin_lim_tf.save_wav(grif_gt, griff_dir_name, f"{utt_id}_gt")
utt_id = utt_ids[idx]
mel_gt = tf.reshape(mel_gt, (-1, 80)).numpy() # [length, 80]
mel_before = tf.reshape(mel_before, (-1, 80)).numpy() # [length, 80]
mel_after = tf.reshape(mel_after, (-1, 80)).numpy() # [length, 80]
# plit figure and save it
figname = os.path.join(dirname, f"{utt_id}.png")
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
im = ax1.imshow(np.rot90(mel_gt), aspect="auto", interpolation="none")
ax1.set_title("Target Mel-Spectrogram")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1)
ax2.set_title("Predicted Mel-before-Spectrogram")
im = ax2.imshow(np.rot90(mel_before), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2)
ax3.set_title("Predicted Mel-after-Spectrogram")
im = ax3.imshow(np.rot90(mel_after), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax3)
plt.tight_layout()
plt.savefig(figname)
plt.close()
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"
)
parser.add_argument(
"--train-dir",
default="dump/train",
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default="dump/valid",
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument(
"--f0-stat", default="./dump/stats_f0.npy", type=str, help="f0-stat path.",
)
parser.add_argument(
"--energy-stat",
default="./dump/stats_energy.npy",
type=str,
help="energy-stat path.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--mixed_precision",
default=1,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--dataset_config", default="preprocess/libritts_preprocess.yaml", type=str,
)
parser.add_argument(
"--dataset_stats", default="dump/stats.npy", type=str,
)
parser.add_argument(
"--dataset_mapping", default="dump/libritts_mapper.npy", type=str,
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="pretrained weights .h5 file to load weights from. Auto-skips non-matching layers",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.mixed_precision = bool(args.mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["mel_length_threshold"]
else:
mel_length_threshold = None
if config["format"] == "npy":
charactor_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
duration_query = "*-durations.npy"
f0_query = "*-raw-f0.npy"
energy_query = "*-raw-energy.npy"
else:
raise ValueError("Only npy are supported.")
# load speakers map from dataset map
with open(args.dataset_mapping) as f:
dataset_mapping = json.load(f)
speakers_map = dataset_mapping["speakers_map"]
# Check n_speakers matches number of speakers in speakers_map
n_speakers = config["fastspeech2_params"]["n_speakers"]
assert n_speakers == len(
speakers_map
), f"Number of speakers in dataset does not match n_speakers in config"
# define train/valid dataset
train_dataset = CharactorDurationF0EnergyMelDataset(
root_dir=args.train_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
f0_query=f0_query,
energy_query=energy_query,
f0_stat=args.f0_stat,
energy_stat=args.energy_stat,
mel_length_threshold=mel_length_threshold,
speakers_map=speakers_map,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = CharactorDurationF0EnergyMelDataset(
root_dir=args.dev_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
f0_query=f0_query,
energy_query=energy_query,
f0_stat=args.f0_stat,
energy_stat=args.energy_stat,
mel_length_threshold=mel_length_threshold,
speakers_map=speakers_map,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = FastSpeech2Trainer(
config=config,
strategy=STRATEGY,
steps=0,
epochs=0,
is_mixed_precision=args.mixed_precision,
stats_path=args.dataset_stats,
dataset_config=args.dataset_config,
)
with STRATEGY.scope():
# define model
fastspeech = TFFastSpeech2(
config=FastSpeech2Config(**config["fastspeech2_params"])
)
fastspeech._build()
fastspeech.summary()
if len(args.pretrained) > 1:
fastspeech.load_weights(args.pretrained, by_name=True, skip_mismatch=True)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
# AdamW for fastspeech
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_steps=config["optimizer_params"]["decay_steps"],
end_learning_rate=config["optimizer_params"]["end_learning_rate"],
)
learning_rate_fn = WarmUp(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_schedule_fn=learning_rate_fn,
warmup_steps=int(
config["train_max_steps"]
* config["optimizer_params"]["warmup_proportion"]
),
)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=config["optimizer_params"]["weight_decay"],
beta_1=0.9,
beta_2=0.98,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
)
_ = optimizer.iterations
# compile trainer
trainer.compile(model=fastspeech, optimizer=optimizer)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 17,059 | 33.816327 | 96 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/melgan/train_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train MelGAN."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tqdm import tqdm
import tensorflow_tts
import tensorflow_tts.configs.melgan as MELGAN_CONFIG
from examples.melgan.audio_mel_dataset import AudioMelDataset
from tensorflow_tts.losses import TFMelSpectrogram
from tensorflow_tts.models import TFMelGANGenerator, TFMelGANMultiScaleDiscriminator
from tensorflow_tts.trainers import GanBasedTrainer
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class MelganTrainer(GanBasedTrainer):
"""Melgan Trainer class based on GanBasedTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_generator_mixed_precision (bool): Use mixed precision for generator or not.
is_discriminator_mixed_precision (bool): Use mixed precision for discriminator or not.
"""
super(MelganTrainer, self).__init__(
steps,
epochs,
config,
strategy,
is_generator_mixed_precision,
is_discriminator_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"adversarial_loss",
"fm_loss",
"gen_loss",
"real_loss",
"fake_loss",
"dis_loss",
"mels_spectrogram_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
self.config = config
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer):
super().compile(gen_model, dis_model, gen_optimizer, dis_optimizer)
# define loss
self.mse_loss = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae_loss = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mels_loss = TFMelSpectrogram()
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
audios = batch["audios"]
y_hat = outputs
p_hat = self._discriminator(y_hat)
p = self._discriminator(tf.expand_dims(audios, 2))
adv_loss = 0.0
for i in range(len(p_hat)):
adv_loss += calculate_3d_loss(
tf.ones_like(p_hat[i][-1]), p_hat[i][-1], loss_fn=self.mse_loss
)
adv_loss /= i + 1
# define feature-matching loss
fm_loss = 0.0
for i in range(len(p_hat)):
for j in range(len(p_hat[i]) - 1):
fm_loss += calculate_3d_loss(
p[i][j], p_hat[i][j], loss_fn=self.mae_loss
)
fm_loss /= (i + 1) * (j + 1)
adv_loss += self.config["lambda_feat_match"] * fm_loss
per_example_losses = adv_loss
dict_metrics_losses = {
"adversarial_loss": adv_loss,
"fm_loss": fm_loss,
"gen_loss": adv_loss,
"mels_spectrogram_loss": calculate_2d_loss(
audios, tf.squeeze(y_hat, -1), loss_fn=self.mels_loss
),
}
return per_example_losses, dict_metrics_losses
def compute_per_example_discriminator_losses(self, batch, gen_outputs):
audios = batch["audios"]
y_hat = gen_outputs
y = tf.expand_dims(audios, 2)
p = self._discriminator(y)
p_hat = self._discriminator(y_hat)
real_loss = 0.0
fake_loss = 0.0
for i in range(len(p)):
real_loss += calculate_3d_loss(
tf.ones_like(p[i][-1]), p[i][-1], loss_fn=self.mse_loss
)
fake_loss += calculate_3d_loss(
tf.zeros_like(p_hat[i][-1]), p_hat[i][-1], loss_fn=self.mse_loss
)
real_loss /= i + 1
fake_loss /= i + 1
dis_loss = real_loss + fake_loss
# calculate per_example_losses and dict_metrics_losses
per_example_losses = dis_loss
dict_metrics_losses = {
"real_loss": real_loss,
"fake_loss": fake_loss,
"dis_loss": dis_loss,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# generate
y_batch_ = self.one_step_predict(batch)
y_batch = batch["audios"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
y_batch_ = y_batch_.values[0].numpy()
y_batch = y_batch.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
y_batch_ = y_batch_.numpy()
y_batch = y_batch.numpy()
utt_ids = utt_ids.numpy()
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (y, y_) in enumerate(zip(y_batch, y_batch_), 0):
# convert to ndarray
y, y_ = tf.reshape(y, [-1]).numpy(), tf.reshape(y_, [-1]).numpy()
# plit figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_)
plt.title(f"generated speech @ {self.steps} steps")
plt.tight_layout()
plt.savefig(figname)
plt.close()
# save as wavefile
y = np.clip(y, -1, 1)
y_ = np.clip(y_, -1, 1)
sf.write(
figname.replace(".png", "_ref.wav"),
y,
self.config["sampling_rate"],
"PCM_16",
)
sf.write(
figname.replace(".png", "_gen.wav"),
y_,
self.config["sampling_rate"],
"PCM_16",
)
def collater(
items,
batch_max_steps=tf.constant(8192, dtype=tf.int32),
hop_size=tf.constant(256, dtype=tf.int32),
):
"""Initialize collater (mapping function) for Tensorflow Audio-Mel Dataset.
Args:
batch_max_steps (int): The maximum length of input signal in batch.
hop_size (int): Hop size of auxiliary features.
"""
audio, mel = items["audios"], items["mels"]
if batch_max_steps is None:
batch_max_steps = (tf.shape(audio)[0] // hop_size) * hop_size
batch_max_frames = batch_max_steps // hop_size
if len(audio) < len(mel) * hop_size:
audio = tf.pad(audio, [[0, len(mel) * hop_size - len(audio)]])
if len(mel) > batch_max_frames:
# randomly pickup with the batch_max_steps length of the part
interval_start = 0
interval_end = len(mel) - batch_max_frames
start_frame = tf.random.uniform(
shape=[], minval=interval_start, maxval=interval_end, dtype=tf.int32
)
start_step = start_frame * hop_size
audio = audio[start_step : start_step + batch_max_steps]
mel = mel[start_frame : start_frame + batch_max_frames, :]
else:
audio = tf.pad(audio, [[0, batch_max_steps - len(audio)]])
mel = tf.pad(mel, [[0, batch_max_frames - len(mel)], [0, 0]])
items = {
"utt_ids": items["utt_ids"],
"audios": audio,
"mels": mel,
"mel_lengths": len(mel),
"audio_lengths": len(audio),
}
return items
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train MelGAN (See detail in tensorflow_tts/bin/train-melgan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="path of .h5 melgan generator to load weights from",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["melgan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = MelganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
# define generator and discriminator
with STRATEGY.scope():
generator = TFMelGANGenerator(
MELGAN_CONFIG.MelGANGeneratorConfig(**config["melgan_generator_params"]),
name="melgan_generator",
)
discriminator = TFMelGANMultiScaleDiscriminator(
MELGAN_CONFIG.MelGANDiscriminatorConfig(
**config["melgan_discriminator_params"]
),
name="melgan_discriminator",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_hat = generator(fake_mels)
discriminator(y_hat)
if len(args.pretrained) > 1:
generator.load_weights(args.pretrained)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
generator.summary()
discriminator.summary()
gen_optimizer = tf.keras.optimizers.Adam(**config["generator_optimizer_params"])
dis_optimizer = tf.keras.optimizers.Adam(
**config["discriminator_optimizer_params"]
)
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 16,989 | 31.48566 | 98 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/melgan_stft/train_melgan_stft.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train MelGAN Multi Resolution STFT Loss."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import yaml
import tensorflow_tts
import tensorflow_tts.configs.melgan as MELGAN_CONFIG
from examples.melgan.audio_mel_dataset import AudioMelDataset
from examples.melgan.train_melgan import MelganTrainer, collater
from tensorflow_tts.losses import TFMultiResolutionSTFT
from tensorflow_tts.models import TFMelGANGenerator, TFMelGANMultiScaleDiscriminator
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class MultiSTFTMelganTrainer(MelganTrainer):
"""Multi STFT Melgan Trainer class based on MelganTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_generator_mixed_precision (bool): Use mixed precision for generator or not.
is_discriminator_mixed_precision (bool): Use mixed precision for discriminator or not.
"""
super(MultiSTFTMelganTrainer, self).__init__(
config=config,
steps=steps,
epochs=epochs,
strategy=strategy,
is_generator_mixed_precision=is_generator_mixed_precision,
is_discriminator_mixed_precision=is_discriminator_mixed_precision,
)
self.list_metrics_name = [
"adversarial_loss",
"fm_loss",
"gen_loss",
"real_loss",
"fake_loss",
"dis_loss",
"spectral_convergence_loss",
"log_magnitude_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer):
super().compile(gen_model, dis_model, gen_optimizer, dis_optimizer)
# define loss
self.stft_loss = TFMultiResolutionSTFT(**self.config["stft_loss_params"])
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
dict_metrics_losses = {}
per_example_losses = 0.0
audios = batch["audios"]
y_hat = outputs
# calculate multi-resolution stft loss
sc_loss, mag_loss = calculate_2d_loss(
audios, tf.squeeze(y_hat, -1), self.stft_loss
)
# trick to prevent loss expoded here
sc_loss = tf.where(sc_loss >= 15.0, 0.0, sc_loss)
mag_loss = tf.where(mag_loss >= 15.0, 0.0, mag_loss)
# compute generator loss
gen_loss = 0.5 * (sc_loss + mag_loss)
if self.steps >= self.config["discriminator_train_start_steps"]:
p_hat = self._discriminator(y_hat)
p = self._discriminator(tf.expand_dims(audios, 2))
adv_loss = 0.0
for i in range(len(p_hat)):
adv_loss += calculate_3d_loss(
tf.ones_like(p_hat[i][-1]), p_hat[i][-1], loss_fn=self.mse_loss
)
adv_loss /= i + 1
# define feature-matching loss
fm_loss = 0.0
for i in range(len(p_hat)):
for j in range(len(p_hat[i]) - 1):
fm_loss += calculate_3d_loss(
p[i][j], p_hat[i][j], loss_fn=self.mae_loss
)
fm_loss /= (i + 1) * (j + 1)
adv_loss += self.config["lambda_feat_match"] * fm_loss
gen_loss += self.config["lambda_adv"] * adv_loss
dict_metrics_losses.update({"adversarial_loss": adv_loss})
dict_metrics_losses.update({"fm_loss": fm_loss})
dict_metrics_losses.update({"gen_loss": gen_loss})
dict_metrics_losses.update({"spectral_convergence_loss": sc_loss})
dict_metrics_losses.update({"log_magnitude_loss": mag_loss})
per_example_losses = gen_loss
return per_example_losses, dict_metrics_losses
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train MelGAN (See detail in tensorflow_tts/bin/train-melgan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="path of .h5 melgan generator to load weights from",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["melgan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = MultiSTFTMelganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
with STRATEGY.scope():
# define generator and discriminator
generator = TFMelGANGenerator(
MELGAN_CONFIG.MelGANGeneratorConfig(**config["melgan_generator_params"]),
name="melgan_generator",
)
discriminator = TFMelGANMultiScaleDiscriminator(
MELGAN_CONFIG.MelGANDiscriminatorConfig(
**config["melgan_discriminator_params"]
),
name="melgan_discriminator",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_hat = generator(fake_mels)
discriminator(y_hat)
if len(args.pretrained) > 1:
generator.load_weights(args.pretrained)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
generator.summary()
discriminator.summary()
# define optimizer
generator_lr_fn = getattr(
tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"]
)(**config["generator_optimizer_params"]["lr_params"])
discriminator_lr_fn = getattr(
tf.keras.optimizers.schedules,
config["discriminator_optimizer_params"]["lr_fn"],
)(**config["discriminator_optimizer_params"]["lr_params"])
gen_optimizer = tf.keras.optimizers.Adam(
learning_rate=generator_lr_fn, amsgrad=False
)
dis_optimizer = tf.keras.optimizers.Adam(
learning_rate=discriminator_lr_fn, amsgrad=False
)
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 13,562 | 32.655087 | 98 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/multiband_melgan/train_multiband_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Multi-Band MelGAN."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tensorflow.keras.mixed_precision import experimental as mixed_precision
import tensorflow_tts
from examples.melgan.audio_mel_dataset import AudioMelDataset
from examples.melgan.train_melgan import MelganTrainer, collater
from tensorflow_tts.configs import (
MultiBandMelGANDiscriminatorConfig,
MultiBandMelGANGeneratorConfig,
)
from tensorflow_tts.losses import TFMultiResolutionSTFT
from tensorflow_tts.models import (
TFPQMF,
TFMelGANGenerator,
TFMelGANMultiScaleDiscriminator,
)
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class MultiBandMelganTrainer(MelganTrainer):
"""Multi-Band MelGAN Trainer class based on MelganTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_generator_mixed_precision (bool): Use mixed precision for generator or not.
is_discriminator_mixed_precision (bool): Use mixed precision for discriminator or not.
"""
super(MultiBandMelganTrainer, self).__init__(
config=config,
steps=steps,
epochs=epochs,
strategy=strategy,
is_generator_mixed_precision=is_generator_mixed_precision,
is_discriminator_mixed_precision=is_discriminator_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"adversarial_loss",
"subband_spectral_convergence_loss",
"subband_log_magnitude_loss",
"fullband_spectral_convergence_loss",
"fullband_log_magnitude_loss",
"gen_loss",
"real_loss",
"fake_loss",
"dis_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer, pqmf):
super().compile(gen_model, dis_model, gen_optimizer, dis_optimizer)
# define loss
self.sub_band_stft_loss = TFMultiResolutionSTFT(
**self.config["subband_stft_loss_params"]
)
self.full_band_stft_loss = TFMultiResolutionSTFT(
**self.config["stft_loss_params"]
)
# define pqmf module
self.pqmf = pqmf
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
dict_metrics_losses = {}
per_example_losses = 0.0
audios = batch["audios"]
y_mb_hat = outputs
y_hat = self.pqmf.synthesis(y_mb_hat)
y_mb = self.pqmf.analysis(tf.expand_dims(audios, -1))
y_mb = tf.transpose(y_mb, (0, 2, 1)) # [B, subbands, T//subbands]
y_mb = tf.reshape(y_mb, (-1, tf.shape(y_mb)[-1])) # [B * subbands, T']
y_mb_hat = tf.transpose(y_mb_hat, (0, 2, 1)) # [B, subbands, T//subbands]
y_mb_hat = tf.reshape(
y_mb_hat, (-1, tf.shape(y_mb_hat)[-1])
) # [B * subbands, T']
# calculate sub/full band spectral_convergence and log mag loss.
sub_sc_loss, sub_mag_loss = calculate_2d_loss(
y_mb, y_mb_hat, self.sub_band_stft_loss
)
sub_sc_loss = tf.reduce_mean(
tf.reshape(sub_sc_loss, [-1, self.pqmf.subbands]), -1
)
sub_mag_loss = tf.reduce_mean(
tf.reshape(sub_mag_loss, [-1, self.pqmf.subbands]), -1
)
full_sc_loss, full_mag_loss = calculate_2d_loss(
audios, tf.squeeze(y_hat, -1), self.full_band_stft_loss
)
# define generator loss
gen_loss = 0.5 * (sub_sc_loss + sub_mag_loss) + 0.5 * (
full_sc_loss + full_mag_loss
)
if self.steps >= self.config["discriminator_train_start_steps"]:
p_hat = self._discriminator(y_hat)
p = self._discriminator(tf.expand_dims(audios, 2))
adv_loss = 0.0
for i in range(len(p_hat)):
adv_loss += calculate_3d_loss(
tf.ones_like(p_hat[i][-1]), p_hat[i][-1], loss_fn=self.mse_loss
)
adv_loss /= i + 1
gen_loss += self.config["lambda_adv"] * adv_loss
dict_metrics_losses.update({"adversarial_loss": adv_loss},)
dict_metrics_losses.update({"gen_loss": gen_loss})
dict_metrics_losses.update({"subband_spectral_convergence_loss": sub_sc_loss})
dict_metrics_losses.update({"subband_log_magnitude_loss": sub_mag_loss})
dict_metrics_losses.update({"fullband_spectral_convergence_loss": full_sc_loss})
dict_metrics_losses.update({"fullband_log_magnitude_loss": full_mag_loss})
per_example_losses = gen_loss
return per_example_losses, dict_metrics_losses
def compute_per_example_discriminator_losses(self, batch, gen_outputs):
"""Compute per example discriminator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
y_mb_hat = gen_outputs
y_hat = self.pqmf.synthesis(y_mb_hat)
(
per_example_losses,
dict_metrics_losses,
) = super().compute_per_example_discriminator_losses(batch, y_hat)
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
y_mb_batch_ = self.one_step_predict(batch) # [B, T // subbands, subbands]
y_batch = batch["audios"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
y_mb_batch_ = y_mb_batch_.values[0].numpy()
y_batch = y_batch.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
y_mb_batch_ = y_mb_batch_.numpy()
y_batch = y_batch.numpy()
utt_ids = utt_ids.numpy()
y_batch_ = self.pqmf.synthesis(y_mb_batch_).numpy() # [B, T, 1]
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (y, y_) in enumerate(zip(y_batch, y_batch_), 0):
# convert to ndarray
y, y_ = tf.reshape(y, [-1]).numpy(), tf.reshape(y_, [-1]).numpy()
# plit figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_)
plt.title(f"generated speech @ {self.steps} steps")
plt.tight_layout()
plt.savefig(figname)
plt.close()
# save as wavefile
y = np.clip(y, -1, 1)
y_ = np.clip(y_, -1, 1)
sf.write(
figname.replace(".png", "_ref.wav"),
y,
self.config["sampling_rate"],
"PCM_16",
)
sf.write(
figname.replace(".png", "_gen.wav"),
y_,
self.config["sampling_rate"],
"PCM_16",
)
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train MultiBand MelGAN (See detail in examples/multiband_melgan/train_multiband_melgan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="path of .h5 mb-melgan generator to load weights from",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["multiband_melgan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = MultiBandMelganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
with STRATEGY.scope():
# define generator and discriminator
generator = TFMelGANGenerator(
MultiBandMelGANGeneratorConfig(
**config["multiband_melgan_generator_params"]
),
name="multi_band_melgan_generator",
)
discriminator = TFMelGANMultiScaleDiscriminator(
MultiBandMelGANDiscriminatorConfig(
**config["multiband_melgan_discriminator_params"]
),
name="multi_band_melgan_discriminator",
)
pqmf = TFPQMF(
MultiBandMelGANGeneratorConfig(
**config["multiband_melgan_generator_params"]
),
dtype=tf.float32,
name="pqmf",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_mb_hat = generator(fake_mels)
y_hat = pqmf.synthesis(y_mb_hat)
discriminator(y_hat)
if len(args.pretrained) > 1:
generator.load_weights(args.pretrained)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
generator.summary()
discriminator.summary()
# define optimizer
generator_lr_fn = getattr(
tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"]
)(**config["generator_optimizer_params"]["lr_params"])
discriminator_lr_fn = getattr(
tf.keras.optimizers.schedules,
config["discriminator_optimizer_params"]["lr_fn"],
)(**config["discriminator_optimizer_params"]["lr_params"])
gen_optimizer = tf.keras.optimizers.Adam(
learning_rate=generator_lr_fn,
amsgrad=config["generator_optimizer_params"]["amsgrad"],
)
dis_optimizer = tf.keras.optimizers.Adam(
learning_rate=discriminator_lr_fn,
amsgrad=config["discriminator_optimizer_params"]["amsgrad"],
)
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
pqmf=pqmf,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 18,014 | 33.379771 | 112 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech2/train_fastspeech2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train FastSpeech2."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import yaml
from tqdm import tqdm
import tensorflow_tts
from examples.fastspeech2.fastspeech2_dataset import CharactorDurationF0EnergyMelDataset
from examples.fastspeech.train_fastspeech import FastSpeechTrainer
from tensorflow_tts.configs import FastSpeech2Config
from tensorflow_tts.models import TFFastSpeech2
from tensorflow_tts.optimizers import AdamWeightDecay, WarmUp
from tensorflow_tts.trainers import Seq2SeqBasedTrainer
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class FastSpeech2Trainer(Seq2SeqBasedTrainer):
"""FastSpeech2 Trainer class based on FastSpeechTrainer."""
def __init__(
self, config, strategy, steps=0, epochs=0, is_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_mixed_precision (bool): Use mixed precision or not.
"""
super(FastSpeech2Trainer, self).__init__(
steps=steps,
epochs=epochs,
config=config,
strategy=strategy,
is_mixed_precision=is_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"duration_loss",
"f0_loss",
"energy_loss",
"mel_loss_before",
"mel_loss_after",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
def compile(self, model, optimizer):
super().compile(model, optimizer)
self.mse = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
def compute_per_example_losses(self, batch, outputs):
"""Compute per example losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
mel_before, mel_after, duration_outputs, f0_outputs, energy_outputs = outputs
log_duration = tf.math.log(
tf.cast(tf.math.add(batch["duration_gts"], 1), tf.float32)
)
duration_loss = calculate_2d_loss(log_duration, duration_outputs, self.mse)
f0_loss = calculate_2d_loss(batch["f0_gts"], f0_outputs, self.mse)
energy_loss = calculate_2d_loss(batch["energy_gts"], energy_outputs, self.mse)
mel_loss_before = calculate_3d_loss(batch["mel_gts"], mel_before, self.mae)
mel_loss_after = calculate_3d_loss(batch["mel_gts"], mel_after, self.mae)
per_example_losses = (
duration_loss + f0_loss + energy_loss + mel_loss_before + mel_loss_after
)
dict_metrics_losses = {
"duration_loss": duration_loss,
"f0_loss": f0_loss,
"energy_loss": energy_loss,
"mel_loss_before": mel_loss_before,
"mel_loss_after": mel_loss_after,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# predict with tf.function.
outputs = self.one_step_predict(batch)
mels_before, mels_after, *_ = outputs
mel_gts = batch["mel_gts"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
mels_before = mels_before.values[0].numpy()
mels_after = mels_after.values[0].numpy()
mel_gts = mel_gts.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
mels_before = mels_before.numpy()
mels_after = mels_after.numpy()
mel_gts = mel_gts.numpy()
utt_ids = utt_ids.numpy()
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (mel_gt, mel_before, mel_after) in enumerate(
zip(mel_gts, mels_before, mels_after), 0
):
mel_gt = tf.reshape(mel_gt, (-1, 80)).numpy() # [length, 80]
mel_before = tf.reshape(mel_before, (-1, 80)).numpy() # [length, 80]
mel_after = tf.reshape(mel_after, (-1, 80)).numpy() # [length, 80]
# plit figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
im = ax1.imshow(np.rot90(mel_gt), aspect="auto", interpolation="none")
ax1.set_title("Target Mel-Spectrogram")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1)
ax2.set_title("Predicted Mel-before-Spectrogram")
im = ax2.imshow(np.rot90(mel_before), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2)
ax3.set_title("Predicted Mel-after-Spectrogram")
im = ax3.imshow(np.rot90(mel_after), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax3)
plt.tight_layout()
plt.savefig(figname)
plt.close()
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument(
"--f0-stat",
default="./dump/stats_f0.npy",
type=str,
required=True,
help="f0-stat path.",
)
parser.add_argument(
"--energy-stat",
default="./dump/stats_energy.npy",
type=str,
required=True,
help="energy-stat path.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="pretrained weights .h5 file to load weights from. Auto-skips non-matching layers",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.mixed_precision = bool(args.mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["mel_length_threshold"]
else:
mel_length_threshold = None
if config["format"] == "npy":
charactor_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
duration_query = "*-durations.npy"
f0_query = "*-raw-f0.npy"
energy_query = "*-raw-energy.npy"
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = CharactorDurationF0EnergyMelDataset(
root_dir=args.train_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
f0_query=f0_query,
energy_query=energy_query,
f0_stat=args.f0_stat,
energy_stat=args.energy_stat,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = CharactorDurationF0EnergyMelDataset(
root_dir=args.dev_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
f0_query=f0_query,
energy_query=energy_query,
f0_stat=args.f0_stat,
energy_stat=args.energy_stat,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = FastSpeech2Trainer(
config=config,
strategy=STRATEGY,
steps=0,
epochs=0,
is_mixed_precision=args.mixed_precision,
)
with STRATEGY.scope():
# define model
fastspeech = TFFastSpeech2(
config=FastSpeech2Config(**config["fastspeech2_params"])
)
fastspeech._build()
fastspeech.summary()
if len(args.pretrained) > 1:
fastspeech.load_weights(args.pretrained, by_name=True, skip_mismatch=True)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
# AdamW for fastspeech
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_steps=config["optimizer_params"]["decay_steps"],
end_learning_rate=config["optimizer_params"]["end_learning_rate"],
)
learning_rate_fn = WarmUp(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_schedule_fn=learning_rate_fn,
warmup_steps=int(
config["train_max_steps"]
* config["optimizer_params"]["warmup_proportion"]
),
)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=config["optimizer_params"]["weight_decay"],
beta_1=0.9,
beta_2=0.98,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
)
_ = optimizer.iterations
# compile trainer
trainer.compile(model=fastspeech, optimizer=optimizer)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 14,446 | 33.562201 | 96 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/hifigan/train_hifigan.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Hifigan."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tqdm import tqdm
import tensorflow_tts
from examples.melgan.audio_mel_dataset import AudioMelDataset
from examples.melgan.train_melgan import collater
from examples.melgan_stft.train_melgan_stft import MultiSTFTMelganTrainer
from tensorflow_tts.configs import (
HifiGANDiscriminatorConfig,
HifiGANGeneratorConfig,
MelGANDiscriminatorConfig,
)
from tensorflow_tts.models import (
TFHifiGANGenerator,
TFHifiGANMultiPeriodDiscriminator,
TFMelGANMultiScaleDiscriminator,
)
from tensorflow_tts.utils import return_strategy
class TFHifiGANDiscriminator(tf.keras.Model):
def __init__(self, multiperiod_dis, multiscale_dis, **kwargs):
super().__init__(**kwargs)
self.multiperiod_dis = multiperiod_dis
self.multiscale_dis = multiscale_dis
def call(self, x):
outs = []
period_outs = self.multiperiod_dis(x)
scale_outs = self.multiscale_dis(x)
outs.extend(period_outs)
outs.extend(scale_outs)
return outs
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train Hifigan (See detail in examples/hifigan/train_hifigan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="path of .h5 melgan generator to load weights from",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["hifigan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = MultiSTFTMelganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
with STRATEGY.scope():
# define generator and discriminator
generator = TFHifiGANGenerator(
HifiGANGeneratorConfig(**config["hifigan_generator_params"]),
name="hifigan_generator",
)
multiperiod_discriminator = TFHifiGANMultiPeriodDiscriminator(
HifiGANDiscriminatorConfig(**config["hifigan_discriminator_params"]),
name="hifigan_multiperiod_discriminator",
)
multiscale_discriminator = TFMelGANMultiScaleDiscriminator(
MelGANDiscriminatorConfig(
**config["melgan_discriminator_params"],
name="melgan_multiscale_discriminator",
)
)
discriminator = TFHifiGANDiscriminator(
multiperiod_discriminator,
multiscale_discriminator,
name="hifigan_discriminator",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_hat = generator(fake_mels)
discriminator(y_hat)
if len(args.pretrained) > 1:
generator.load_weights(args.pretrained)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
generator.summary()
discriminator.summary()
# define optimizer
generator_lr_fn = getattr(
tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"]
)(**config["generator_optimizer_params"]["lr_params"])
discriminator_lr_fn = getattr(
tf.keras.optimizers.schedules,
config["discriminator_optimizer_params"]["lr_fn"],
)(**config["discriminator_optimizer_params"]["lr_params"])
gen_optimizer = tf.keras.optimizers.Adam(
learning_rate=generator_lr_fn,
amsgrad=config["generator_optimizer_params"]["amsgrad"],
)
dis_optimizer = tf.keras.optimizers.Adam(
learning_rate=discriminator_lr_fn,
amsgrad=config["discriminator_optimizer_params"]["amsgrad"],
)
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 10,464 | 31.101227 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/parallel_wavegan/train_parallel_wavegan.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train ParallelWavegan."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import soundfile as sf
import numpy as np
import yaml
import tensorflow_tts
from examples.melgan.audio_mel_dataset import AudioMelDataset
from examples.melgan.train_melgan import collater
from tensorflow_tts.configs import (
ParallelWaveGANGeneratorConfig,
ParallelWaveGANDiscriminatorConfig,
)
from tensorflow_tts.models import (
TFParallelWaveGANGenerator,
TFParallelWaveGANDiscriminator,
)
from tensorflow_tts.trainers import GanBasedTrainer
from tensorflow_tts.losses import TFMultiResolutionSTFT
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
from tensorflow_addons.optimizers import RectifiedAdam
class ParallelWaveganTrainer(GanBasedTrainer):
"""ParallelWaveGAN Trainer class based on GanBasedTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_generator_mixed_precision (bool): Use mixed precision for generator or not.
is_discriminator_mixed_precision (bool): Use mixed precision for discriminator or not.
"""
super(ParallelWaveganTrainer, self).__init__(
config=config,
steps=steps,
epochs=epochs,
strategy=strategy,
is_generator_mixed_precision=is_generator_mixed_precision,
is_discriminator_mixed_precision=is_discriminator_mixed_precision,
)
self.list_metrics_name = [
"adversarial_loss",
"gen_loss",
"real_loss",
"fake_loss",
"dis_loss",
"spectral_convergence_loss",
"log_magnitude_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer):
super().compile(gen_model, dis_model, gen_optimizer, dis_optimizer)
# define loss
self.stft_loss = TFMultiResolutionSTFT(**self.config["stft_loss_params"])
self.mse_loss = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae_loss = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
dict_metrics_losses = {}
per_example_losses = 0.0
audios = batch["audios"]
y_hat = outputs
# calculate multi-resolution stft loss
sc_loss, mag_loss = calculate_2d_loss(
audios, tf.squeeze(y_hat, -1), self.stft_loss
)
gen_loss = 0.5 * (sc_loss + mag_loss)
if self.steps >= self.config["discriminator_train_start_steps"]:
p_hat = self._discriminator(y_hat)
p = self._discriminator(tf.expand_dims(audios, 2))
adv_loss = 0.0
adv_loss += calculate_3d_loss(
tf.ones_like(p_hat), p_hat, loss_fn=self.mse_loss
)
gen_loss += self.config["lambda_adv"] * adv_loss
# update dict_metrics_losses
dict_metrics_losses.update({"adversarial_loss": adv_loss})
dict_metrics_losses.update({"gen_loss": gen_loss})
dict_metrics_losses.update({"spectral_convergence_loss": sc_loss})
dict_metrics_losses.update({"log_magnitude_loss": mag_loss})
per_example_losses = gen_loss
return per_example_losses, dict_metrics_losses
def compute_per_example_discriminator_losses(self, batch, gen_outputs):
audios = batch["audios"]
y_hat = gen_outputs
y = tf.expand_dims(audios, 2)
p = self._discriminator(y)
p_hat = self._discriminator(y_hat)
real_loss = 0.0
fake_loss = 0.0
real_loss += calculate_3d_loss(tf.ones_like(p), p, loss_fn=self.mse_loss)
fake_loss += calculate_3d_loss(
tf.zeros_like(p_hat), p_hat, loss_fn=self.mse_loss
)
dis_loss = real_loss + fake_loss
# calculate per_example_losses and dict_metrics_losses
per_example_losses = dis_loss
dict_metrics_losses = {
"real_loss": real_loss,
"fake_loss": fake_loss,
"dis_loss": dis_loss,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# generate
y_batch_ = self.one_step_predict(batch)
y_batch = batch["audios"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
y_batch_ = y_batch_.values[0].numpy()
y_batch = y_batch.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
y_batch_ = y_batch_.numpy()
y_batch = y_batch.numpy()
utt_ids = utt_ids.numpy()
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (y, y_) in enumerate(zip(y_batch, y_batch_), 0):
# convert to ndarray
y, y_ = tf.reshape(y, [-1]).numpy(), tf.reshape(y_, [-1]).numpy()
# plit figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_)
plt.title(f"generated speech @ {self.steps} steps")
plt.tight_layout()
plt.savefig(figname)
plt.close()
# save as wavefile
y = np.clip(y, -1, 1)
y_ = np.clip(y_, -1, 1)
sf.write(
figname.replace(".png", "_ref.wav"),
y,
self.config["sampling_rate"],
"PCM_16",
)
sf.write(
figname.replace(".png", "_gen.wav"),
y_,
self.config["sampling_rate"],
"PCM_16",
)
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train ParallelWaveGan (See detail in tensorflow_tts/examples/parallel_wavegan/train_parallel_wavegan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["parallel_wavegan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = ParallelWaveganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
with STRATEGY.scope():
# define generator and discriminator
generator = TFParallelWaveGANGenerator(
ParallelWaveGANGeneratorConfig(
**config["parallel_wavegan_generator_params"]
),
name="parallel_wavegan_generator",
)
discriminator = TFParallelWaveGANDiscriminator(
ParallelWaveGANDiscriminatorConfig(
**config["parallel_wavegan_discriminator_params"]
),
name="parallel_wavegan_discriminator",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_hat = generator(fake_mels)
discriminator(y_hat)
generator.summary()
discriminator.summary()
# define optimizer
generator_lr_fn = getattr(
tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"]
)(**config["generator_optimizer_params"]["lr_params"])
discriminator_lr_fn = getattr(
tf.keras.optimizers.schedules,
config["discriminator_optimizer_params"]["lr_fn"],
)(**config["discriminator_optimizer_params"]["lr_params"])
gen_optimizer = RectifiedAdam(learning_rate=generator_lr_fn, amsgrad=False)
dis_optimizer = RectifiedAdam(learning_rate=discriminator_lr_fn, amsgrad=False)
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 15,699 | 32.052632 | 126 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/base_model.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Model for all model."""
import tensorflow as tf
import yaml
import os
import numpy as np
from tensorflow_tts.utils.utils import MODEL_FILE_NAME, CONFIG_FILE_NAME
class BaseModel(tf.keras.Model):
def set_config(self, config):
self.config = config
def save_pretrained(self, saved_path):
"""Save config and weights to file"""
os.makedirs(saved_path, exist_ok=True)
self.config.save_pretrained(saved_path)
self.save_weights(os.path.join(saved_path, MODEL_FILE_NAME))
| 1,131 | 32.294118 | 74 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/parallel_wavegan.py | # -*- coding: utf-8 -*-
# Copyright 2020 The TensorFlowTTS Team and Tomoki Hayashi (@kan-bayashi)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parallel-wavegan Modules. Based on pytorch implementation (https://github.com/kan-bayashi/ParallelWaveGAN)"""
import tensorflow as tf
from tensorflow_tts.models import BaseModel
def get_initializer(initializer_seed=42):
"""Creates a `tf.initializers.he_normal` with the given seed.
Args:
initializer_seed: int, initializer seed.
Returns:
HeNormal initializer with seed = `initializer_seed`.
"""
return tf.keras.initializers.he_normal(seed=initializer_seed)
class TFConv1d1x1(tf.keras.layers.Conv1D):
"""1x1 Conv1d with customized initialization."""
def __init__(self, filters, use_bias, padding, initializer_seed, **kwargs):
"""Initialize 1x1 Conv1d module."""
super().__init__(
filters=filters,
kernel_size=1,
strides=1,
padding=padding,
dilation_rate=1,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
**kwargs,
)
class TFConv1d(tf.keras.layers.Conv1D):
"""Conv1d with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv1d module."""
initializer_seed = kwargs.pop("initializer_seed", 42)
super().__init__(
*args, **kwargs, kernel_initializer=get_initializer(initializer_seed)
)
class TFResidualBlock(tf.keras.layers.Layer):
"""Residual block module in WaveNet."""
def __init__(
self,
kernel_size=3,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
dropout_rate=0.0,
dilation_rate=1,
use_bias=True,
use_causal_conv=False,
initializer_seed=42,
**kwargs,
):
"""Initialize ResidualBlock module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
residual_channels (int): Number of channels for residual connection.
skip_channels (int): Number of channels for skip connection.
aux_channels (int): Local conditioning channels i.e. auxiliary input dimension.
dropout_rate (float): Dropout probability.
dilation_rate (int): Dilation factor.
use_bias (bool): Whether to add bias parameter in convolution layers.
use_causal_conv (bool): Whether to use use_causal_conv or non-use_causal_conv convolution.
initializer_seed (int32): initializer seed.
"""
super().__init__(**kwargs)
self.dropout_rate = dropout_rate
# no future time stamps available
self.use_causal_conv = use_causal_conv
# dilation conv
self.conv = TFConv1d(
filters=gate_channels,
kernel_size=kernel_size,
padding="same" if self.use_causal_conv is False else "causal",
strides=1,
dilation_rate=dilation_rate,
use_bias=use_bias,
initializer_seed=initializer_seed,
)
# local conditionong
if aux_channels > 0:
self.conv1x1_aux = TFConv1d1x1(
gate_channels,
use_bias=False,
padding="same",
initializer_seed=initializer_seed,
name="conv1x1_aux",
)
else:
self.conv1x1_aux = None
# conv output is split into two groups
gate_out_channels = gate_channels // 2
self.conv1x1_out = TFConv1d1x1(
residual_channels,
use_bias=use_bias,
padding="same",
initializer_seed=initializer_seed,
name="conv1x1_out",
)
self.conv1x1_skip = TFConv1d1x1(
skip_channels,
use_bias=use_bias,
padding="same",
initializer_seed=initializer_seed,
name="conv1x1_skip",
)
self.dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
def call(self, x, c, training=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, residual_channels, T).
c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T).
Returns:
Tensor: Output tensor for residual connection (B, T, residual_channels).
Tensor: Output tensor for skip connection (B, T, skip_channels).
"""
residual = x
x = self.dropout(x, training=training)
x = self.conv(x)
# split into two part for gated activation
xa, xb = tf.split(x, 2, axis=-1)
# local conditioning
if c is not None:
assert self.conv1x1_aux is not None
c = self.conv1x1_aux(c)
ca, cb = tf.split(c, 2, axis=-1)
xa, xb = xa + ca, xb + cb
x = tf.nn.tanh(xa) * tf.nn.sigmoid(xb)
# for skip connection
s = self.conv1x1_skip(x)
# for residual connection
x = self.conv1x1_out(x)
x = (x + residual) * tf.math.sqrt(0.5)
return x, s
class TFStretch1d(tf.keras.layers.Layer):
"""Stretch2d module."""
def __init__(self, x_scale, y_scale, method="nearest", **kwargs):
"""Initialize Stretch2d module.
Args:
x_scale (int): X scaling factor (Time axis in spectrogram).
y_scale (int): Y scaling factor (Frequency axis in spectrogram).
method (str): Interpolation method.
"""
super().__init__(**kwargs)
self.x_scale = x_scale
self.y_scale = y_scale
self.method = method
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C, 1).
Returns:
Tensor: Interpolated tensor (B, T * x_scale, C * y_scale, 1)
"""
x_shape = tf.shape(x)
new_size = (x_shape[1] * self.x_scale, x_shape[2] * self.y_scale)
x = tf.image.resize(x, method=self.method, size=new_size)
return x
class TFUpsampleNetWork(tf.keras.layers.Layer):
"""Upsampling network module."""
def __init__(
self,
output_channels,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
use_causal_conv=False,
**kwargs,
):
"""Initialize upsampling network module.
Args:
output_channels (int): output feature channels.
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
interpolate_mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
"""
super().__init__(**kwargs)
self.use_causal_conv = use_causal_conv
self.up_layers = []
for scale in upsample_scales:
# interpolation layer
stretch = TFStretch1d(
scale, 1, interpolate_mode, name="stretch_._{}".format(scale)
) # ->> outputs: [B, T * scale, C * 1, 1]
self.up_layers += [stretch]
# conv layer
assert (
freq_axis_kernel_size - 1
) % 2 == 0, "Not support even number freq axis kernel size."
kernel_size = scale * 2 + 1
conv = tf.keras.layers.Conv2D(
filters=1,
kernel_size=(kernel_size, freq_axis_kernel_size),
padding="causal" if self.use_causal_conv is True else "same",
use_bias=False,
) # ->> outputs: [B, T * scale, C * 1, 1]
self.up_layers += [conv]
# nonlinear
if nonlinear_activation is not None:
nonlinear = getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
)
self.up_layers += [nonlinear]
def call(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, T, C).
Returns:
Tensor: Upsampled tensor (B, T', C), where T' = T * prod(upsample_scales).
"""
c = tf.expand_dims(c, -1) # [B, T, C, 1]
for f in self.up_layers:
c = f(c)
return tf.squeeze(c, -1) # [B, T, C]
class TFConvInUpsampleNetWork(tf.keras.layers.Layer):
"""Convolution + upsampling network module."""
def __init__(
self,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
aux_channels=80,
aux_context_window=0,
use_causal_conv=False,
initializer_seed=42,
**kwargs,
):
"""Initialize convolution + upsampling network module.
Args:
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
aux_channels (int): Number of channels of pre-convolutional layer.
aux_context_window (int): Context window size of the pre-convolutional layer.
use_causal_conv (bool): Whether to use causal structure.
"""
super().__init__(**kwargs)
self.aux_context_window = aux_context_window
self.use_causal_conv = use_causal_conv and aux_context_window > 0
# To capture wide-context information in conditional features
kernel_size = (
aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1
)
self.conv_in = TFConv1d(
filters=aux_channels,
kernel_size=kernel_size,
padding="same",
use_bias=False,
initializer_seed=initializer_seed,
name="conv_in",
)
self.upsample = TFUpsampleNetWork(
output_channels=aux_channels,
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
use_causal_conv=use_causal_conv,
name="upsample_network",
)
def call(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, T', C).
Returns:
Tensor: Upsampled tensor (B, T, C),
where T = (T' - aux_context_window * 2) * prod(upsample_scales).
Note:
The length of inputs considers the context window size.
"""
c_ = self.conv_in(c)
return self.upsample(c_)
class TFParallelWaveGANGenerator(BaseModel):
"""Parallel WaveGAN Generator module."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.out_channels = config.out_channels
self.aux_channels = config.aux_channels
self.n_layers = config.n_layers
self.stacks = config.stacks
self.kernel_size = config.kernel_size
self.upsample_params = config.upsample_params
# check the number of layers and stacks
assert self.n_layers % self.stacks == 0
n_layers_per_stack = self.n_layers // self.stacks
# define first convolution
self.first_conv = TFConv1d1x1(
filters=config.residual_channels,
use_bias=True,
padding="same",
initializer_seed=config.initializer_seed,
name="first_convolution",
)
# define conv + upsampling network
if config.upsample_conditional_features:
self.upsample_params.update({"use_causal_conv": config.use_causal_conv})
self.upsample_params.update(
{
"aux_channels": config.aux_channels,
"aux_context_window": config.aux_context_window,
}
)
self.upsample_net = TFConvInUpsampleNetWork(**self.upsample_params)
else:
self.upsample_net = None
# define residual blocks
self.conv_layers = []
for layer in range(self.n_layers):
dilation_rate = 2 ** (layer % n_layers_per_stack)
conv = TFResidualBlock(
kernel_size=config.kernel_size,
residual_channels=config.residual_channels,
gate_channels=config.gate_channels,
skip_channels=config.skip_channels,
aux_channels=config.aux_channels,
dilation_rate=dilation_rate,
dropout_rate=config.dropout_rate,
use_bias=config.use_bias,
use_causal_conv=config.use_causal_conv,
initializer_seed=config.initializer_seed,
name="residual_block_._{}".format(layer),
)
self.conv_layers += [conv]
# define output layers
self.last_conv_layers = [
tf.keras.layers.ReLU(),
TFConv1d1x1(
filters=config.skip_channels,
use_bias=config.use_bias,
padding="same",
initializer_seed=config.initializer_seed,
),
tf.keras.layers.ReLU(),
TFConv1d1x1(
filters=config.out_channels,
use_bias=True,
padding="same",
initializer_seed=config.initializer_seed,
),
tf.keras.layers.Activation("tanh"),
]
def _build(self):
mels = tf.random.uniform(shape=[2, 20, 80], dtype=tf.float32)
self(mels, training=tf.cast(True, tf.bool))
def call(self, mels, training=False, **kwargs):
"""Calculate forward propagation.
Args:
mels (Tensor): Local conditioning auxiliary features (B, T', C).
Returns:
Tensor: Output tensor (B, T, 1)
"""
# perform upsampling
if mels is not None and self.upsample_net is not None:
c = self.upsample_net(mels)
# random noise x
# enccode to hidden representation
x = tf.expand_dims(tf.random.normal(shape=tf.shape(c)[0:2]), axis=2)
x = self.first_conv(x)
skips = 0
for f in self.conv_layers:
x, h = f(x, c, training=training)
skips += h
skips *= tf.math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
x = skips
for f in self.last_conv_layers:
x = f(x)
return x
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32, name="mels"),
],
)
def inference(self, mels):
"""Calculate forward propagation.
Args:
c (Tensor): Local conditioning auxiliary features (B, T', C).
Returns:
Tensor: Output tensor (B, T, 1)
"""
# perform upsampling
if mels is not None and self.upsample_net is not None:
c = self.upsample_net(mels)
# enccode to hidden representation
x = tf.expand_dims(tf.random.normal(shape=tf.shape(c)[0:2]), axis=2)
x = self.first_conv(x)
skips = 0
for f in self.conv_layers:
x, h = f(x, c, training=False)
skips += h
skips *= tf.math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
x = skips
for f in self.last_conv_layers:
x = f(x)
return x
class TFParallelWaveGANDiscriminator(BaseModel):
"""Parallel WaveGAN Discriminator module."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
assert (config.kernel_size - 1) % 2 == 0, "Not support even number kernel size."
assert config.dilation_factor > 0, "Dilation factor must be > 0."
self.conv_layers = []
for i in range(config.n_layers - 1):
if i == 0:
dilation_rate = 1
else:
dilation_rate = (
i if config.dilation_factor == 1 else config.dilation_factor ** i
)
self.conv_layers += [
TFConv1d(
filters=config.conv_channels,
kernel_size=config.kernel_size,
padding="same",
dilation_rate=dilation_rate,
use_bias=config.use_bias,
initializer_seed=config.initializer_seed,
)
]
self.conv_layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(
**config.nonlinear_activation_params
)
]
self.conv_layers += [
TFConv1d(
filters=config.out_channels,
kernel_size=config.kernel_size,
padding="same",
use_bias=config.use_bias,
initializer_seed=config.initializer_seed,
)
]
if config.apply_sigmoid_at_last:
self.conv_layers += [
tf.keras.layers.Activation("sigmoid"),
]
def _build(self):
x = tf.random.uniform(shape=[2, 16000, 1])
self(x)
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
Tensor: Output tensor (B, T, 1)
"""
for f in self.conv_layers:
x = f(x)
return x
| 18,663 | 32.508079 | 112 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 The MelGAN Authors and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MelGAN Modules."""
import numpy as np
import tensorflow as tf
from tensorflow_tts.models import BaseModel
from tensorflow_tts.utils import GroupConv1D, WeightNormalization
def get_initializer(initializer_seed=42):
"""Creates a `tf.initializers.glorot_normal` with the given seed.
Args:
initializer_seed: int, initializer seed.
Returns:
GlorotNormal initializer with seed = `initializer_seed`.
"""
return tf.keras.initializers.GlorotNormal(seed=initializer_seed)
class TFReflectionPad1d(tf.keras.layers.Layer):
"""Tensorflow ReflectionPad1d module."""
def __init__(self, padding_size, padding_type="REFLECT", **kwargs):
"""Initialize TFReflectionPad1d module.
Args:
padding_size (int)
padding_type (str) ("CONSTANT", "REFLECT", or "SYMMETRIC". Default is "REFLECT")
"""
super().__init__(**kwargs)
self.padding_size = padding_size
self.padding_type = padding_type
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Padded tensor (B, T + 2 * padding_size, C).
"""
return tf.pad(
x,
[[0, 0], [self.padding_size, self.padding_size], [0, 0]],
self.padding_type,
)
class TFConvTranspose1d(tf.keras.layers.Layer):
"""Tensorflow ConvTranspose1d module."""
def __init__(
self,
filters,
kernel_size,
strides,
padding,
is_weight_norm,
initializer_seed,
**kwargs
):
"""Initialize TFConvTranspose1d( module.
Args:
filters (int): Number of filters.
kernel_size (int): kernel size.
strides (int): Stride width.
padding (str): Padding type ("same" or "valid").
"""
super().__init__(**kwargs)
self.conv1d_transpose = tf.keras.layers.Conv2DTranspose(
filters=filters,
kernel_size=(kernel_size, 1),
strides=(strides, 1),
padding="same",
kernel_initializer=get_initializer(initializer_seed),
)
if is_weight_norm:
self.conv1d_transpose = WeightNormalization(self.conv1d_transpose)
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T', C').
"""
x = tf.expand_dims(x, 2)
x = self.conv1d_transpose(x)
x = tf.squeeze(x, 2)
return x
class TFResidualStack(tf.keras.layers.Layer):
"""Tensorflow ResidualStack module."""
def __init__(
self,
kernel_size,
filters,
dilation_rate,
use_bias,
nonlinear_activation,
nonlinear_activation_params,
is_weight_norm,
initializer_seed,
**kwargs
):
"""Initialize TFResidualStack module.
Args:
kernel_size (int): Kernel size.
filters (int): Number of filters.
dilation_rate (int): Dilation rate.
use_bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
"""
super().__init__(**kwargs)
self.blocks = [
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
),
TFReflectionPad1d((kernel_size - 1) // 2 * dilation_rate),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
),
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=1,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
),
]
self.shortcut = tf.keras.layers.Conv1D(
filters=filters,
kernel_size=1,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
name="shortcut",
)
# apply weightnorm
if is_weight_norm:
self._apply_weightnorm(self.blocks)
self.shortcut = WeightNormalization(self.shortcut)
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T, C).
"""
_x = tf.identity(x)
for layer in self.blocks:
_x = layer(_x)
shortcut = self.shortcut(x)
return shortcut + _x
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
class TFMelGANGenerator(BaseModel):
"""Tensorflow MelGAN generator module."""
def __init__(self, config, **kwargs):
"""Initialize TFMelGANGenerator module.
Args:
config: config object of Melgan generator.
"""
super().__init__(**kwargs)
# check hyper parameter is valid or not
assert config.filters >= np.prod(config.upsample_scales)
assert config.filters % (2 ** len(config.upsample_scales)) == 0
# add initial layer
layers = []
layers += [
TFReflectionPad1d(
(config.kernel_size - 1) // 2,
padding_type=config.padding_type,
name="first_reflect_padding",
),
tf.keras.layers.Conv1D(
filters=config.filters,
kernel_size=config.kernel_size,
use_bias=config.use_bias,
kernel_initializer=get_initializer(config.initializer_seed),
),
]
for i, upsample_scale in enumerate(config.upsample_scales):
# add upsampling layer
layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(
**config.nonlinear_activation_params
),
TFConvTranspose1d(
filters=config.filters // (2 ** (i + 1)),
kernel_size=upsample_scale * 2,
strides=upsample_scale,
padding="same",
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name="conv_transpose_._{}".format(i),
),
]
# ad residual stack layer
for j in range(config.stacks):
layers += [
TFResidualStack(
kernel_size=config.stack_kernel_size,
filters=config.filters // (2 ** (i + 1)),
dilation_rate=config.stack_kernel_size ** j,
use_bias=config.use_bias,
nonlinear_activation=config.nonlinear_activation,
nonlinear_activation_params=config.nonlinear_activation_params,
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name="residual_stack_._{}._._{}".format(i, j),
)
]
# add final layer
layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(
**config.nonlinear_activation_params
),
TFReflectionPad1d(
(config.kernel_size - 1) // 2,
padding_type=config.padding_type,
name="last_reflect_padding",
),
tf.keras.layers.Conv1D(
filters=config.out_channels,
kernel_size=config.kernel_size,
use_bias=config.use_bias,
kernel_initializer=get_initializer(config.initializer_seed),
dtype=tf.float32,
),
]
if config.use_final_nolinear_activation:
layers += [tf.keras.layers.Activation("tanh", dtype=tf.float32)]
if config.is_weight_norm is True:
self._apply_weightnorm(layers)
self.melgan = tf.keras.models.Sequential(layers)
def call(self, mels, **kwargs):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, T, channels)
Returns:
Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)
"""
return self.inference(mels)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32, name="mels")
]
)
def inference(self, mels):
return self.melgan(mels)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[1, None, 80], dtype=tf.float32, name="mels")
]
)
def inference_tflite(self, mels):
return self.melgan(mels)
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
def _build(self):
"""Build model by passing fake input."""
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
self(fake_mels)
class TFMelGANDiscriminator(tf.keras.layers.Layer):
"""Tensorflow MelGAN generator module."""
def __init__(
self,
out_channels=1,
kernel_sizes=[5, 3],
filters=16,
max_downsample_filters=1024,
use_bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
is_weight_norm=True,
initializer_seed=0.02,
**kwargs
):
"""Initilize MelGAN discriminator module.
Args:
out_channels (int): Number of output channels.
kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,
and the first and the second kernel sizes will be used for the last two layers.
For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15.
the last two layers' kernel size will be 5 and 3, respectively.
filters (int): Initial number of filters for conv layer.
max_downsample_filters (int): Maximum number of filters for downsampling layers.
use_bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
padding_type (str): Padding type (support only "REFLECT", "CONSTANT", "SYMMETRIC")
"""
super().__init__(**kwargs)
discriminator = []
# check kernel_size is valid
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1
assert kernel_sizes[1] % 2 == 1
# add first layer
discriminator = [
TFReflectionPad1d(
(np.prod(kernel_sizes) - 1) // 2, padding_type=padding_type
),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=int(np.prod(kernel_sizes)),
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
),
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
),
]
# add downsample layers
in_chs = filters
with tf.keras.utils.CustomObjectScope({"GroupConv1D": GroupConv1D}):
for downsample_scale in downsample_scales:
out_chs = min(in_chs * downsample_scale, max_downsample_filters)
discriminator += [
GroupConv1D(
filters=out_chs,
kernel_size=downsample_scale * 10 + 1,
strides=downsample_scale,
padding="same",
use_bias=use_bias,
groups=in_chs // 4,
kernel_initializer=get_initializer(initializer_seed),
)
]
discriminator += [
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
)
]
in_chs = out_chs
# add final layers
out_chs = min(in_chs * 2, max_downsample_filters)
discriminator += [
tf.keras.layers.Conv1D(
filters=out_chs,
kernel_size=kernel_sizes[0],
padding="same",
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
)
]
discriminator += [
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
)
]
discriminator += [
tf.keras.layers.Conv1D(
filters=out_channels,
kernel_size=kernel_sizes[1],
padding="same",
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
)
]
if is_weight_norm is True:
self._apply_weightnorm(discriminator)
self.disciminator = discriminator
def call(self, x, **kwargs):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of output tensors of each layer.
"""
outs = []
for f in self.disciminator:
x = f(x)
outs += [x]
return outs
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
class TFMelGANMultiScaleDiscriminator(BaseModel):
"""MelGAN multi-scale discriminator module."""
def __init__(self, config, **kwargs):
"""Initilize MelGAN multi-scale discriminator module.
Args:
config: config object for melgan discriminator
"""
super().__init__(**kwargs)
self.discriminator = []
# add discriminator
for i in range(config.scales):
self.discriminator += [
TFMelGANDiscriminator(
out_channels=config.out_channels,
kernel_sizes=config.kernel_sizes,
filters=config.filters,
max_downsample_filters=config.max_downsample_filters,
use_bias=config.use_bias,
downsample_scales=config.downsample_scales,
nonlinear_activation=config.nonlinear_activation,
nonlinear_activation_params=config.nonlinear_activation_params,
padding_type=config.padding_type,
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name="melgan_discriminator_scale_._{}".format(i),
)
]
self.pooling = getattr(tf.keras.layers, config.downsample_pooling)(
**config.downsample_pooling_params
)
def call(self, x, **kwargs):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs = []
for f in self.discriminator:
outs += [f(x)]
x = self.pooling(x)
return outs
| 17,807 | 34.687375 | 106 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/tacotron2.py | # -*- coding: utf-8 -*-
# Copyright 2020 The Tacotron-2 Authors, Minh Nguyen (@dathudeptrai), Eren Gölge (@erogol) and Jae Yoo (@jaeyoo)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tacotron-2 Modules."""
import collections
import numpy as np
import tensorflow as tf
# TODO: once https://github.com/tensorflow/addons/pull/1964 is fixed,
# uncomment this line.
# from tensorflow_addons.seq2seq import dynamic_decode
from tensorflow_addons.seq2seq import BahdanauAttention, Decoder, Sampler
from tensorflow_tts.utils import dynamic_decode
from tensorflow_tts.models import BaseModel
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
def gelu(x):
"""Gaussian Error Linear unit."""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Smoother gaussian Error Linear Unit."""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
"""Swish activation function."""
return tf.nn.swish(x)
def mish(x):
return x * tf.math.tanh(tf.math.softplus(x))
ACT2FN = {
"identity": tf.keras.layers.Activation("linear"),
"tanh": tf.keras.layers.Activation("tanh"),
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
"gelu_new": tf.keras.layers.Activation(gelu_new),
"mish": tf.keras.layers.Activation(mish),
}
class TFEmbedding(tf.keras.layers.Embedding):
"""Faster version of embedding."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def call(self, inputs):
inputs = tf.cast(tf.expand_dims(inputs, -1), tf.int32)
outputs = tf.gather_nd(self.embeddings, inputs)
return outputs
class TFTacotronConvBatchNorm(tf.keras.layers.Layer):
"""Tacotron-2 Convolutional Batchnorm module."""
def __init__(
self, filters, kernel_size, dropout_rate, activation=None, name_idx=None
):
super().__init__()
self.conv1d = tf.keras.layers.Conv1D(
filters,
kernel_size,
kernel_initializer=get_initializer(0.02),
padding="same",
name="conv_._{}".format(name_idx),
)
self.norm = tf.keras.layers.experimental.SyncBatchNormalization(
axis=-1, name="batch_norm_._{}".format(name_idx)
)
self.dropout = tf.keras.layers.Dropout(
rate=dropout_rate, name="dropout_._{}".format(name_idx)
)
self.act = ACT2FN[activation]
def call(self, inputs, training=False):
outputs = self.conv1d(inputs)
outputs = self.norm(outputs, training=training)
outputs = self.act(outputs)
outputs = self.dropout(outputs, training=training)
return outputs
class TFTacotronEmbeddings(tf.keras.layers.Layer):
"""Construct character/phoneme/positional/speaker embeddings."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_hidden_size = config.embedding_hidden_size
self.initializer_range = config.initializer_range
self.config = config
if config.n_speakers > 1:
self.speaker_embeddings = TFEmbedding(
config.n_speakers,
config.embedding_hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=config.embedding_hidden_size, name="speaker_fc"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = tf.keras.layers.Dropout(config.embedding_dropout_prob)
def build(self, input_shape):
"""Build shared character/phoneme embedding layers."""
with tf.name_scope("character_embeddings"):
self.character_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.embedding_hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(self, inputs, training=False):
"""Get character embeddings of inputs.
Args:
1. character, Tensor (int32) shape [batch_size, length].
2. speaker_id, Tensor (int32) shape [batch_size]
Returns:
Tensor (float32) shape [batch_size, length, embedding_size].
"""
return self._embedding(inputs, training=training)
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, speaker_ids = inputs
# create embeddings
inputs_embeds = tf.gather(self.character_embeddings, input_ids)
embeddings = inputs_embeds
if self.config.n_speakers > 1:
speaker_embeddings = self.speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
# sum all embedding
embeddings += extended_speaker_features
# apply layer-norm and dropout for embeddings.
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings, training=training)
return embeddings
class TFTacotronEncoderConvs(tf.keras.layers.Layer):
"""Tacotron-2 Encoder Convolutional Batchnorm module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_encoder):
conv = TFTacotronConvBatchNorm(
filters=config.encoder_conv_filters,
kernel_size=config.encoder_conv_kernel_sizes,
activation=config.encoder_conv_activation,
dropout_rate=config.encoder_conv_dropout_rate,
name_idx=i,
)
self.conv_batch_norm.append(conv)
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for conv in self.conv_batch_norm:
outputs = conv(outputs, training=training)
return outputs
class TFTacotronEncoder(tf.keras.layers.Layer):
"""Tacotron-2 Encoder."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.embeddings = TFTacotronEmbeddings(config, name="embeddings")
self.convbn = TFTacotronEncoderConvs(config, name="conv_batch_norm")
self.bilstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(
units=config.encoder_lstm_units, return_sequences=True
),
name="bilstm",
)
if config.n_speakers > 1:
self.encoder_speaker_embeddings = TFEmbedding(
config.n_speakers,
config.embedding_hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="encoder_speaker_embeddings",
)
self.encoder_speaker_fc = tf.keras.layers.Dense(
units=config.encoder_lstm_units * 2, name="encoder_speaker_fc"
)
self.config = config
def call(self, inputs, training=False):
"""Call logic."""
input_ids, speaker_ids, input_mask = inputs
# create embedding and mask them since we sum
# speaker embedding to all character embedding.
input_embeddings = self.embeddings([input_ids, speaker_ids], training=training)
# pass embeddings to convolution batch norm
conv_outputs = self.convbn(input_embeddings, training=training)
# bi-lstm.
outputs = self.bilstm(conv_outputs, mask=input_mask)
if self.config.n_speakers > 1:
encoder_speaker_embeddings = self.encoder_speaker_embeddings(speaker_ids)
encoder_speaker_features = tf.math.softplus(
self.encoder_speaker_fc(encoder_speaker_embeddings)
)
# extended encoderspeaker embeddings
extended_encoder_speaker_features = encoder_speaker_features[
:, tf.newaxis, :
]
# sum to encoder outputs
outputs += extended_encoder_speaker_features
return outputs
class Tacotron2Sampler(Sampler):
"""Tacotron2 sampler for Seq2Seq training."""
def __init__(
self, config,
):
super().__init__()
self.config = config
# create schedule factor.
# the input of a next decoder cell is calculated by formular:
# next_inputs = ratio * prev_groundtruth_outputs + (1.0 - ratio) * prev_predicted_outputs.
self._ratio = tf.constant(1.0, dtype=tf.float32)
self._reduction_factor = self.config.reduction_factor
def setup_target(self, targets, mel_lengths):
"""Setup ground-truth mel outputs for decoder."""
self.mel_lengths = mel_lengths
self.set_batch_size(tf.shape(targets)[0])
self.targets = targets[
:, self._reduction_factor - 1 :: self._reduction_factor, :
]
self.max_lengths = tf.tile([tf.shape(self.targets)[1]], [self._batch_size])
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tf.TensorShape([])
@property
def sample_ids_dtype(self):
return tf.int32
@property
def reduction_factor(self):
return self._reduction_factor
def initialize(self):
"""Return (Finished, next_inputs)."""
return (
tf.tile([False], [self._batch_size]),
tf.tile([[0.0]], [self._batch_size, self.config.n_mels]),
)
def sample(self, time, outputs, state):
return tf.tile([0], [self._batch_size])
def next_inputs(
self,
time,
outputs,
state,
sample_ids,
stop_token_prediction,
training=False,
**kwargs,
):
if training:
finished = time + 1 >= self.max_lengths
next_inputs = (
self._ratio * self.targets[:, time, :]
+ (1.0 - self._ratio) * outputs[:, -self.config.n_mels :]
)
next_state = state
return (finished, next_inputs, next_state)
else:
stop_token_prediction = tf.nn.sigmoid(stop_token_prediction)
finished = tf.cast(tf.round(stop_token_prediction), tf.bool)
finished = tf.reduce_all(finished)
next_inputs = outputs[:, -self.config.n_mels :]
next_state = state
return (finished, next_inputs, next_state)
def set_batch_size(self, batch_size):
self._batch_size = batch_size
class TFTacotronLocationSensitiveAttention(BahdanauAttention):
"""Tacotron-2 Location Sensitive Attention module."""
def __init__(
self,
config,
memory,
mask_encoder=True,
memory_sequence_length=None,
is_cumulate=True,
):
"""Init variables."""
memory_length = memory_sequence_length if (mask_encoder is True) else None
super().__init__(
units=config.attention_dim,
memory=memory,
memory_sequence_length=memory_length,
probability_fn="softmax",
name="LocationSensitiveAttention",
)
self.location_convolution = tf.keras.layers.Conv1D(
filters=config.attention_filters,
kernel_size=config.attention_kernel,
padding="same",
use_bias=False,
name="location_conv",
)
self.location_layer = tf.keras.layers.Dense(
units=config.attention_dim, use_bias=False, name="location_layer"
)
self.v = tf.keras.layers.Dense(1, use_bias=True, name="scores_attention")
self.config = config
self.is_cumulate = is_cumulate
self.use_window = False
def setup_window(self, win_front=2, win_back=4):
self.win_front = tf.constant(win_front, tf.int32)
self.win_back = tf.constant(win_back, tf.int32)
self._indices = tf.expand_dims(tf.range(tf.shape(self.keys)[1]), 0)
self._indices = tf.tile(
self._indices, [tf.shape(self.keys)[0], 1]
) # [batch_size, max_time]
self.use_window = True
def _compute_window_mask(self, max_alignments):
"""Compute window mask for inference.
Args:
max_alignments (int): [batch_size]
"""
expanded_max_alignments = tf.expand_dims(max_alignments, 1) # [batch_size, 1]
low = expanded_max_alignments - self.win_front
high = expanded_max_alignments + self.win_back
mlow = tf.cast((self._indices < low), tf.float32)
mhigh = tf.cast((self._indices > high), tf.float32)
mask = mlow + mhigh
return mask # [batch_size, max_length]
def __call__(self, inputs, training=False):
query, state, prev_max_alignments = inputs
processed_query = self.query_layer(query) if self.query_layer else query
processed_query = tf.expand_dims(processed_query, 1)
expanded_alignments = tf.expand_dims(state, axis=2)
f = self.location_convolution(expanded_alignments)
processed_location_features = self.location_layer(f)
energy = self._location_sensitive_score(
processed_query, processed_location_features, self.keys
)
# mask energy on inference steps.
if self.use_window is True:
window_mask = self._compute_window_mask(prev_max_alignments)
energy = energy + window_mask * -1e20
alignments = self.probability_fn(energy, state)
if self.is_cumulate:
state = alignments + state
else:
state = alignments
expanded_alignments = tf.expand_dims(alignments, 2)
context = tf.reduce_sum(expanded_alignments * self.values, 1)
return context, alignments, state
def _location_sensitive_score(self, W_query, W_fil, W_keys):
"""Calculate location sensitive energy."""
return tf.squeeze(self.v(tf.nn.tanh(W_keys + W_query + W_fil)), -1)
def get_initial_state(self, batch_size, size):
"""Get initial alignments."""
return tf.zeros(shape=[batch_size, size], dtype=tf.float32)
def get_initial_context(self, batch_size):
"""Get initial attention."""
return tf.zeros(
shape=[batch_size, self.config.encoder_lstm_units * 2], dtype=tf.float32
)
class TFTacotronPrenet(tf.keras.layers.Layer):
"""Tacotron-2 prenet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.prenet_dense = [
tf.keras.layers.Dense(
units=config.prenet_units,
activation=ACT2FN[config.prenet_activation],
name="dense_._{}".format(i),
)
for i in range(config.n_prenet_layers)
]
self.dropout = tf.keras.layers.Dropout(
rate=config.prenet_dropout_rate, name="dropout"
)
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for layer in self.prenet_dense:
outputs = layer(outputs)
outputs = self.dropout(outputs, training=True)
return outputs
class TFTacotronPostnet(tf.keras.layers.Layer):
"""Tacotron-2 postnet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_postnet):
conv = TFTacotronConvBatchNorm(
filters=config.postnet_conv_filters,
kernel_size=config.postnet_conv_kernel_sizes,
dropout_rate=config.postnet_dropout_rate,
activation="identity" if i + 1 == config.n_conv_postnet else "tanh",
name_idx=i,
)
self.conv_batch_norm.append(conv)
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for _, conv in enumerate(self.conv_batch_norm):
outputs = conv(outputs, training=training)
return outputs
TFTacotronDecoderCellState = collections.namedtuple(
"TFTacotronDecoderCellState",
[
"attention_lstm_state",
"decoder_lstms_state",
"context",
"time",
"state",
"alignment_history",
"max_alignments",
],
)
TFDecoderOutput = collections.namedtuple(
"TFDecoderOutput", ("mel_output", "token_output", "sample_id")
)
class TFTacotronDecoderCell(tf.keras.layers.AbstractRNNCell):
"""Tacotron-2 custom decoder cell."""
def __init__(self, config, enable_tflite_convertible=False, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.enable_tflite_convertible = enable_tflite_convertible
self.prenet = TFTacotronPrenet(config, name="prenet")
# define lstm cell on decoder.
# TODO(@dathudeptrai) switch to zone-out lstm.
self.attention_lstm = tf.keras.layers.LSTMCell(
units=config.decoder_lstm_units, name="attention_lstm_cell"
)
lstm_cells = []
for i in range(config.n_lstm_decoder):
lstm_cell = tf.keras.layers.LSTMCell(
units=config.decoder_lstm_units, name="lstm_cell_._{}".format(i)
)
lstm_cells.append(lstm_cell)
self.decoder_lstms = tf.keras.layers.StackedRNNCells(
lstm_cells, name="decoder_lstms"
)
# define attention layer.
if config.attention_type == "lsa":
# create location-sensitive attention.
self.attention_layer = TFTacotronLocationSensitiveAttention(
config,
memory=None,
mask_encoder=True,
memory_sequence_length=None,
is_cumulate=True,
)
else:
raise ValueError("Only lsa (location-sensitive attention) is supported")
# frame, stop projection layer.
self.frame_projection = tf.keras.layers.Dense(
units=config.n_mels * config.reduction_factor, name="frame_projection"
)
self.stop_projection = tf.keras.layers.Dense(
units=config.reduction_factor, name="stop_projection"
)
self.config = config
def set_alignment_size(self, alignment_size):
self.alignment_size = alignment_size
@property
def output_size(self):
"""Return output (mel) size."""
return self.frame_projection.units
@property
def state_size(self):
"""Return hidden state size."""
return TFTacotronDecoderCellState(
attention_lstm_state=self.attention_lstm.state_size,
decoder_lstms_state=self.decoder_lstms.state_size,
time=tf.TensorShape([]),
attention=self.config.attention_dim,
state=self.alignment_size,
alignment_history=(),
max_alignments=tf.TensorShape([1]),
)
def get_initial_state(self, batch_size):
"""Get initial states."""
initial_attention_lstm_cell_states = self.attention_lstm.get_initial_state(
None, batch_size, dtype=tf.float32
)
initial_decoder_lstms_cell_states = self.decoder_lstms.get_initial_state(
None, batch_size, dtype=tf.float32
)
initial_context = tf.zeros(
shape=[batch_size, self.config.encoder_lstm_units * 2], dtype=tf.float32
)
initial_state = self.attention_layer.get_initial_state(
batch_size, size=self.alignment_size
)
if self.enable_tflite_convertible:
initial_alignment_history = ()
else:
initial_alignment_history = tf.TensorArray(
dtype=tf.float32, size=0, dynamic_size=True
)
return TFTacotronDecoderCellState(
attention_lstm_state=initial_attention_lstm_cell_states,
decoder_lstms_state=initial_decoder_lstms_cell_states,
time=tf.zeros([], dtype=tf.int32),
context=initial_context,
state=initial_state,
alignment_history=initial_alignment_history,
max_alignments=tf.zeros([batch_size], dtype=tf.int32),
)
def call(self, inputs, states, training=False):
"""Call logic."""
decoder_input = inputs
# 1. apply prenet for decoder_input.
prenet_out = self.prenet(decoder_input, training=training) # [batch_size, dim]
# 2. concat prenet_out and prev context vector
# then use it as input of attention lstm layer.
attention_lstm_input = tf.concat([prenet_out, states.context], axis=-1)
attention_lstm_output, next_attention_lstm_state = self.attention_lstm(
attention_lstm_input, states.attention_lstm_state
)
# 3. compute context, alignment and cumulative alignment.
prev_state = states.state
if not self.enable_tflite_convertible:
prev_alignment_history = states.alignment_history
prev_max_alignments = states.max_alignments
context, alignments, state = self.attention_layer(
[attention_lstm_output, prev_state, prev_max_alignments], training=training,
)
# 4. run decoder lstm(s)
decoder_lstms_input = tf.concat([attention_lstm_output, context], axis=-1)
decoder_lstms_output, next_decoder_lstms_state = self.decoder_lstms(
decoder_lstms_input, states.decoder_lstms_state
)
# 5. compute frame feature and stop token.
projection_inputs = tf.concat([decoder_lstms_output, context], axis=-1)
decoder_outputs = self.frame_projection(projection_inputs)
stop_inputs = tf.concat([decoder_lstms_output, decoder_outputs], axis=-1)
stop_tokens = self.stop_projection(stop_inputs)
# 6. save alignment history to visualize.
if self.enable_tflite_convertible:
alignment_history = ()
else:
alignment_history = prev_alignment_history.write(states.time, alignments)
# 7. return new states.
new_states = TFTacotronDecoderCellState(
attention_lstm_state=next_attention_lstm_state,
decoder_lstms_state=next_decoder_lstms_state,
time=states.time + 1,
context=context,
state=state,
alignment_history=alignment_history,
max_alignments=tf.argmax(alignments, -1, output_type=tf.int32),
)
return (decoder_outputs, stop_tokens), new_states
class TFTacotronDecoder(Decoder):
"""Tacotron-2 Decoder."""
def __init__(
self,
decoder_cell,
decoder_sampler,
output_layer=None,
enable_tflite_convertible=False,
):
"""Initial variables."""
self.cell = decoder_cell
self.sampler = decoder_sampler
self.output_layer = output_layer
self.enable_tflite_convertible = enable_tflite_convertible
def setup_decoder_init_state(self, decoder_init_state):
self.initial_state = decoder_init_state
def initialize(self, **kwargs):
return self.sampler.initialize() + (self.initial_state,)
@property
def output_size(self):
return TFDecoderOutput(
mel_output=tf.nest.map_structure(
lambda shape: tf.TensorShape(shape), self.cell.output_size
),
token_output=tf.TensorShape(self.sampler.reduction_factor),
sample_id=tf.TensorShape([1])
if self.enable_tflite_convertible
else self.sampler.sample_ids_shape, # tf.TensorShape([])
)
@property
def output_dtype(self):
return TFDecoderOutput(tf.float32, tf.float32, self.sampler.sample_ids_dtype)
@property
def batch_size(self):
return self.sampler._batch_size
def step(self, time, inputs, state, training=False):
(mel_outputs, stop_tokens), cell_state = self.cell(
inputs, state, training=training
)
if self.output_layer is not None:
mel_outputs = self.output_layer(mel_outputs)
sample_ids = self.sampler.sample(
time=time, outputs=mel_outputs, state=cell_state
)
(finished, next_inputs, next_state) = self.sampler.next_inputs(
time=time,
outputs=mel_outputs,
state=cell_state,
sample_ids=sample_ids,
stop_token_prediction=stop_tokens,
training=training,
)
outputs = TFDecoderOutput(mel_outputs, stop_tokens, sample_ids)
return (outputs, next_state, next_inputs, finished)
class TFTacotron2(BaseModel):
"""Tensorflow tacotron-2 model."""
def __init__(self, config, **kwargs):
"""Initalize tacotron-2 layers."""
enable_tflite_convertible = kwargs.pop("enable_tflite_convertible", False)
super().__init__(self, **kwargs)
self.encoder = TFTacotronEncoder(config, name="encoder")
self.decoder_cell = TFTacotronDecoderCell(
config,
name="decoder_cell",
enable_tflite_convertible=enable_tflite_convertible,
)
self.decoder = TFTacotronDecoder(
self.decoder_cell,
Tacotron2Sampler(config),
enable_tflite_convertible=enable_tflite_convertible,
)
self.postnet = TFTacotronPostnet(config, name="post_net")
self.post_projection = tf.keras.layers.Dense(
units=config.n_mels, name="residual_projection"
)
self.use_window_mask = False
self.maximum_iterations = 4000
self.enable_tflite_convertible = enable_tflite_convertible
self.config = config
def setup_window(self, win_front, win_back):
"""Call only for inference."""
self.use_window_mask = True
self.win_front = win_front
self.win_back = win_back
def setup_maximum_iterations(self, maximum_iterations):
"""Call only for inference."""
self.maximum_iterations = maximum_iterations
def _build(self):
input_ids = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]])
input_lengths = np.array([9])
speaker_ids = np.array([0])
mel_outputs = np.random.normal(size=(1, 50, 80)).astype(np.float32)
mel_lengths = np.array([50])
self(
input_ids,
input_lengths,
speaker_ids,
mel_outputs,
mel_lengths,
10,
training=True,
)
def call(
self,
input_ids,
input_lengths,
speaker_ids,
mel_gts,
mel_lengths,
maximum_iterations=None,
use_window_mask=False,
win_front=2,
win_back=3,
training=False,
**kwargs,
):
"""Call logic."""
# create input-mask based on input_lengths
input_mask = tf.sequence_mask(
input_lengths,
maxlen=tf.reduce_max(input_lengths),
name="input_sequence_masks",
)
# Encoder Step.
encoder_hidden_states = self.encoder(
[input_ids, speaker_ids, input_mask], training=training
)
batch_size = tf.shape(encoder_hidden_states)[0]
alignment_size = tf.shape(encoder_hidden_states)[1]
# Setup some initial placeholders for decoder step. Include:
# 1. mel_gts, mel_lengths for teacher forcing mode.
# 2. alignment_size for attention size.
# 3. initial state for decoder cell.
# 4. memory (encoder hidden state) for attention mechanism.
self.decoder.sampler.setup_target(targets=mel_gts, mel_lengths=mel_lengths)
self.decoder.cell.set_alignment_size(alignment_size)
self.decoder.setup_decoder_init_state(
self.decoder.cell.get_initial_state(batch_size)
)
self.decoder.cell.attention_layer.setup_memory(
memory=encoder_hidden_states,
memory_sequence_length=input_lengths, # use for mask attention.
)
if use_window_mask:
self.decoder.cell.attention_layer.setup_window(
win_front=win_front, win_back=win_back
)
# run decode step.
(
(frames_prediction, stop_token_prediction, _),
final_decoder_state,
_,
) = dynamic_decode(
self.decoder,
maximum_iterations=maximum_iterations,
enable_tflite_convertible=self.enable_tflite_convertible,
training=training,
)
decoder_outputs = tf.reshape(
frames_prediction, [batch_size, -1, self.config.n_mels]
)
stop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])
residual = self.postnet(decoder_outputs, training=training)
residual_projection = self.post_projection(residual)
mel_outputs = decoder_outputs + residual_projection
if self.enable_tflite_convertible:
mask = tf.math.not_equal(
tf.cast(
tf.reduce_sum(tf.abs(decoder_outputs), axis=-1), dtype=tf.int32
),
0,
)
decoder_outputs = tf.expand_dims(
tf.boolean_mask(decoder_outputs, mask), axis=0
)
mel_outputs = tf.expand_dims(tf.boolean_mask(mel_outputs, mask), axis=0)
alignment_history = ()
else:
alignment_history = tf.transpose(
final_decoder_state.alignment_history.stack(), [1, 2, 0]
)
return decoder_outputs, mel_outputs, stop_token_prediction, alignment_history
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec([None, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec([None,], dtype=tf.int32, name="input_lengths"),
tf.TensorSpec([None,], dtype=tf.int32, name="speaker_ids"),
],
)
def inference(self, input_ids, input_lengths, speaker_ids, **kwargs):
"""Call logic."""
# create input-mask based on input_lengths
input_mask = tf.sequence_mask(
input_lengths,
maxlen=tf.reduce_max(input_lengths),
name="input_sequence_masks",
)
# Encoder Step.
encoder_hidden_states = self.encoder(
[input_ids, speaker_ids, input_mask], training=False
)
batch_size = tf.shape(encoder_hidden_states)[0]
alignment_size = tf.shape(encoder_hidden_states)[1]
# Setup some initial placeholders for decoder step. Include:
# 1. batch_size for inference.
# 2. alignment_size for attention size.
# 3. initial state for decoder cell.
# 4. memory (encoder hidden state) for attention mechanism.
# 5. window front/back to solve long sentence synthesize problems. (call after setup memory.)
self.decoder.sampler.set_batch_size(batch_size)
self.decoder.cell.set_alignment_size(alignment_size)
self.decoder.setup_decoder_init_state(
self.decoder.cell.get_initial_state(batch_size)
)
self.decoder.cell.attention_layer.setup_memory(
memory=encoder_hidden_states,
memory_sequence_length=input_lengths, # use for mask attention.
)
if self.use_window_mask:
self.decoder.cell.attention_layer.setup_window(
win_front=self.win_front, win_back=self.win_back
)
# run decode step.
(
(frames_prediction, stop_token_prediction, _),
final_decoder_state,
_,
) = dynamic_decode(
self.decoder, maximum_iterations=self.maximum_iterations, training=False
)
decoder_outputs = tf.reshape(
frames_prediction, [batch_size, -1, self.config.n_mels]
)
stop_token_predictions = tf.reshape(stop_token_prediction, [batch_size, -1])
residual = self.postnet(decoder_outputs, training=False)
residual_projection = self.post_projection(residual)
mel_outputs = decoder_outputs + residual_projection
alignment_historys = tf.transpose(
final_decoder_state.alignment_history.stack(), [1, 2, 0]
)
return decoder_outputs, mel_outputs, stop_token_predictions, alignment_historys
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec([1, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec([1,], dtype=tf.int32, name="input_lengths"),
tf.TensorSpec([1,], dtype=tf.int32, name="speaker_ids"),
],
)
def inference_tflite(self, input_ids, input_lengths, speaker_ids, **kwargs):
"""Call logic."""
# create input-mask based on input_lengths
input_mask = tf.sequence_mask(
input_lengths,
maxlen=tf.reduce_max(input_lengths),
name="input_sequence_masks",
)
# Encoder Step.
encoder_hidden_states = self.encoder(
[input_ids, speaker_ids, input_mask], training=False
)
batch_size = tf.shape(encoder_hidden_states)[0]
alignment_size = tf.shape(encoder_hidden_states)[1]
# Setup some initial placeholders for decoder step. Include:
# 1. batch_size for inference.
# 2. alignment_size for attention size.
# 3. initial state for decoder cell.
# 4. memory (encoder hidden state) for attention mechanism.
# 5. window front/back to solve long sentence synthesize problems. (call after setup memory.)
self.decoder.sampler.set_batch_size(batch_size)
self.decoder.cell.set_alignment_size(alignment_size)
self.decoder.setup_decoder_init_state(
self.decoder.cell.get_initial_state(batch_size)
)
self.decoder.cell.attention_layer.setup_memory(
memory=encoder_hidden_states,
memory_sequence_length=input_lengths, # use for mask attention.
)
if self.use_window_mask:
self.decoder.cell.attention_layer.setup_window(
win_front=self.win_front, win_back=self.win_back
)
# run decode step.
(
(frames_prediction, stop_token_prediction, _),
final_decoder_state,
_,
) = dynamic_decode(
self.decoder,
maximum_iterations=self.maximum_iterations,
enable_tflite_convertible=self.enable_tflite_convertible,
training=False,
)
decoder_outputs = tf.reshape(
frames_prediction, [batch_size, -1, self.config.n_mels]
)
stop_token_predictions = tf.reshape(stop_token_prediction, [batch_size, -1])
residual = self.postnet(decoder_outputs, training=False)
residual_projection = self.post_projection(residual)
mel_outputs = decoder_outputs + residual_projection
if self.enable_tflite_convertible:
mask = tf.math.not_equal(
tf.cast(
tf.reduce_sum(tf.abs(decoder_outputs), axis=-1), dtype=tf.int32
),
0,
)
decoder_outputs = tf.expand_dims(
tf.boolean_mask(decoder_outputs, mask), axis=0
)
mel_outputs = tf.expand_dims(tf.boolean_mask(mel_outputs, mask), axis=0)
alignment_historys = ()
else:
alignment_historys = tf.transpose(
final_decoder_state.alignment_history.stack(), [1, 2, 0]
)
return decoder_outputs, mel_outputs, stop_token_predictions, alignment_historys
| 37,180 | 34.716619 | 112 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/mb_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 The Multi-band MelGAN Authors , Minh Nguyen (@dathudeptrai) and Tomoki Hayashi (@kan-bayashi)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
#
# Compatible with https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/layers/pqmf.py.
"""Multi-band MelGAN Modules."""
import numpy as np
import tensorflow as tf
from scipy.signal import kaiser
from tensorflow_tts.models import BaseModel
from tensorflow_tts.models import TFMelGANGenerator
def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0):
"""Design prototype filter for PQMF.
This method is based on `A Kaiser window approach for the design of prototype
filters of cosine modulated filterbanks`_.
Args:
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
Returns:
ndarray: Impluse response of prototype filter (taps + 1,).
.. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
https://ieeexplore.ieee.org/abstract/document/681427
"""
# check the arguments are valid
assert taps % 2 == 0, "The number of taps mush be even number."
assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
# make initial filter
omega_c = np.pi * cutoff_ratio
with np.errstate(invalid="ignore"):
h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) / (
np.pi * (np.arange(taps + 1) - 0.5 * taps)
)
# fix nan due to indeterminate form
h_i[taps // 2] = np.cos(0) * cutoff_ratio
# apply kaiser window
w = kaiser(taps + 1, beta)
h = h_i * w
return h
class TFPQMF(tf.keras.layers.Layer):
"""PQMF module."""
def __init__(self, config, **kwargs):
"""Initilize PQMF module.
Args:
config (class): MultiBandMelGANGeneratorConfig
"""
super().__init__(**kwargs)
subbands = config.subbands
taps = config.taps
cutoff_ratio = config.cutoff_ratio
beta = config.beta
# define filter coefficient
h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
h_analysis = np.zeros((subbands, len(h_proto)))
h_synthesis = np.zeros((subbands, len(h_proto)))
for k in range(subbands):
h_analysis[k] = (
2
* h_proto
* np.cos(
(2 * k + 1)
* (np.pi / (2 * subbands))
* (np.arange(taps + 1) - (taps / 2))
+ (-1) ** k * np.pi / 4
)
)
h_synthesis[k] = (
2
* h_proto
* np.cos(
(2 * k + 1)
* (np.pi / (2 * subbands))
* (np.arange(taps + 1) - (taps / 2))
- (-1) ** k * np.pi / 4
)
)
# [subbands, 1, taps + 1] == [filter_width, in_channels, out_channels]
analysis_filter = np.expand_dims(h_analysis, 1)
analysis_filter = np.transpose(analysis_filter, (2, 1, 0))
synthesis_filter = np.expand_dims(h_synthesis, 0)
synthesis_filter = np.transpose(synthesis_filter, (2, 1, 0))
# filter for downsampling & upsampling
updown_filter = np.zeros((subbands, subbands, subbands), dtype=np.float32)
for k in range(subbands):
updown_filter[0, k, k] = 1.0
self.subbands = subbands
self.taps = taps
self.analysis_filter = analysis_filter.astype(np.float32)
self.synthesis_filter = synthesis_filter.astype(np.float32)
self.updown_filter = updown_filter.astype(np.float32)
@tf.function(
experimental_relax_shapes=True,
input_signature=[tf.TensorSpec(shape=[None, None, 1], dtype=tf.float32)],
)
def analysis(self, x):
"""Analysis with PQMF.
Args:
x (Tensor): Input tensor (B, T, 1).
Returns:
Tensor: Output tensor (B, T // subbands, subbands).
"""
x = tf.pad(x, [[0, 0], [self.taps // 2, self.taps // 2], [0, 0]])
x = tf.nn.conv1d(x, self.analysis_filter, stride=1, padding="VALID")
x = tf.nn.conv1d(x, self.updown_filter, stride=self.subbands, padding="VALID")
return x
@tf.function(
experimental_relax_shapes=True,
input_signature=[tf.TensorSpec(shape=[None, None, None], dtype=tf.float32)],
)
def synthesis(self, x):
"""Synthesis with PQMF.
Args:
x (Tensor): Input tensor (B, T // subbands, subbands).
Returns:
Tensor: Output tensor (B, T, 1).
"""
x = tf.nn.conv1d_transpose(
x,
self.updown_filter * self.subbands,
strides=self.subbands,
output_shape=(
tf.shape(x)[0],
tf.shape(x)[1] * self.subbands,
self.subbands,
),
)
x = tf.pad(x, [[0, 0], [self.taps // 2, self.taps // 2], [0, 0]])
return tf.nn.conv1d(x, self.synthesis_filter, stride=1, padding="VALID")
class TFMBMelGANGenerator(TFMelGANGenerator):
"""Tensorflow MBMelGAN generator module."""
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.pqmf = TFPQMF(config=config, dtype=tf.float32, name="pqmf")
def call(self, mels, **kwargs):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, T, channels)
Returns:
Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)
"""
return self.inference(mels)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32, name="mels")
]
)
def inference(self, mels):
mb_audios = self.melgan(mels)
return self.pqmf.synthesis(mb_audios)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[1, None, 80], dtype=tf.float32, name="mels")
]
)
def inference_tflite(self, mels):
mb_audios = self.melgan(mels)
return self.pqmf.synthesis(mb_audios)
| 6,890 | 34.704663 | 110 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/hifigan.py | # -*- coding: utf-8 -*-
# Copyright 2020 The Hifigan Authors and TensorflowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hifi Modules."""
import numpy as np
import tensorflow as tf
from tensorflow_tts.models.melgan import TFReflectionPad1d
from tensorflow_tts.models.melgan import TFConvTranspose1d
from tensorflow_tts.utils import GroupConv1D
from tensorflow_tts.utils import WeightNormalization
from tensorflow_tts.models import BaseModel
from tensorflow_tts.models import TFMelGANGenerator
class TFHifiResBlock(tf.keras.layers.Layer):
"""Tensorflow Hifigan resblock 1 module."""
def __init__(
self,
kernel_size,
filters,
dilation_rate,
use_bias,
nonlinear_activation,
nonlinear_activation_params,
is_weight_norm,
initializer_seed,
**kwargs
):
"""Initialize TFHifiResBlock module.
Args:
kernel_size (int): Kernel size.
filters (int): Number of filters.
dilation_rate (list): List dilation rate.
use_bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
is_weight_norm (bool): Whether to use weight norm or not.
"""
super().__init__(**kwargs)
self.blocks_1 = []
self.blocks_2 = []
for i in range(len(dilation_rate)):
self.blocks_1.append(
[
TFReflectionPad1d((kernel_size - 1) // 2 * dilation_rate[i]),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate[i],
use_bias=use_bias,
),
]
)
self.blocks_2.append(
[
TFReflectionPad1d((kernel_size - 1) // 2 * 1),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=kernel_size,
dilation_rate=1,
use_bias=use_bias,
),
]
)
self.activation = getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
)
# apply weightnorm
if is_weight_norm:
self._apply_weightnorm(self.blocks_1)
self._apply_weightnorm(self.blocks_2)
def call(self, x, training=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T, C).
"""
for c1, c2 in zip(self.blocks_1, self.blocks_2):
xt = self.activation(x)
for c in c1:
xt = c(xt)
xt = self.activation(xt)
for c in c2:
xt = c(xt)
x = xt + x
return x
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
class TFMultiHifiResBlock(tf.keras.layers.Layer):
"""Tensorflow Multi Hifigan resblock 1 module."""
def __init__(self, list_resblock, **kwargs):
super().__init__(**kwargs)
self.list_resblock = list_resblock
def call(self, x, training=False):
xs = None
for resblock in self.list_resblock:
if xs is None:
xs = resblock(x, training=training)
else:
xs += resblock(x, training=training)
return xs / len(self.list_resblock)
class TFHifiGANGenerator(BaseModel):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
# check hyper parameter is valid or not
assert (
config.stacks
== len(config.stack_kernel_size)
== len(config.stack_dilation_rate)
)
# add initial layer
layers = []
layers += [
TFReflectionPad1d(
(config.kernel_size - 1) // 2,
padding_type=config.padding_type,
name="first_reflect_padding",
),
tf.keras.layers.Conv1D(
filters=config.filters,
kernel_size=config.kernel_size,
use_bias=config.use_bias,
),
]
for i, upsample_scale in enumerate(config.upsample_scales):
# add upsampling layer
layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(
**config.nonlinear_activation_params
),
TFConvTranspose1d(
filters=config.filters // (2 ** (i + 1)),
kernel_size=upsample_scale * 2,
strides=upsample_scale,
padding="same",
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name="conv_transpose_._{}".format(i),
),
]
# add residual stack layer
layers += [
TFMultiHifiResBlock(
list_resblock=[
TFHifiResBlock(
kernel_size=config.stack_kernel_size[j],
filters=config.filters // (2 ** (i + 1)),
dilation_rate=config.stack_dilation_rate[j],
use_bias=config.use_bias,
nonlinear_activation=config.nonlinear_activation,
nonlinear_activation_params=config.nonlinear_activation_params,
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name="hifigan_resblock_._{}".format(j),
)
for j in range(config.stacks)
],
name="multi_hifigan_resblock_._{}".format(i),
)
]
# add final layer
layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(
**config.nonlinear_activation_params
),
TFReflectionPad1d(
(config.kernel_size - 1) // 2,
padding_type=config.padding_type,
name="last_reflect_padding",
),
tf.keras.layers.Conv1D(
filters=config.out_channels,
kernel_size=config.kernel_size,
use_bias=config.use_bias,
dtype=tf.float32,
),
]
if config.use_final_nolinear_activation:
layers += [tf.keras.layers.Activation("tanh", dtype=tf.float32)]
if config.is_weight_norm is True:
self._apply_weightnorm(layers)
self.hifigan = tf.keras.models.Sequential(layers)
def call(self, mels, **kwargs):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, T, channels)
Returns:
Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)
"""
return self.inference(mels)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32, name="mels")
]
)
def inference(self, mels):
return self.hifigan(mels)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[1, None, 80], dtype=tf.float32, name="mels")
]
)
def inference_tflite(self, mels):
return self.hifigan(mels)
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
def _build(self):
"""Build model by passing fake input."""
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
self(fake_mels)
class TFHifiGANPeriodDiscriminator(tf.keras.layers.Layer):
"""Tensorflow Hifigan period discriminator module."""
def __init__(
self,
period,
out_channels=1,
n_layers=5,
kernel_size=5,
strides=3,
filters=8,
filter_scales=4,
max_filters=1024,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
initializer_seed=42,
is_weight_norm=False,
**kwargs
):
super().__init__(**kwargs)
self.period = period
self.out_filters = out_channels
self.convs = []
for i in range(n_layers):
self.convs.append(
tf.keras.layers.Conv2D(
filters=min(filters * (filter_scales ** (i + 1)), max_filters),
kernel_size=(kernel_size, 1),
strides=(strides, 1),
padding="same",
)
)
self.conv_post = tf.keras.layers.Conv2D(
filters=out_channels, kernel_size=(3, 1), padding="same",
)
self.activation = getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
)
if is_weight_norm:
self._apply_weightnorm(self.convs)
self.conv_post = WeightNormalization(self.conv_post)
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of output tensors.
"""
shape = tf.shape(x)
n_pad = tf.convert_to_tensor(0, dtype=tf.int32)
if shape[1] % self.period != 0:
n_pad = self.period - (shape[1] % self.period)
x = tf.pad(x, [[0, 0], [0, n_pad], [0, 0]], "REFLECT")
x = tf.reshape(
x, [shape[0], (shape[1] + n_pad) // self.period, self.period, x.shape[2]]
)
for layer in self.convs:
x = layer(x)
x = self.activation(x)
x = self.conv_post(x)
x = tf.reshape(x, [shape[0], -1, self.out_filters])
return [x]
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
class TFHifiGANMultiPeriodDiscriminator(BaseModel):
"""Tensorflow Hifigan Multi Period discriminator module."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.discriminator = []
# add discriminator
for i in range(len(config.period_scales)):
self.discriminator += [
TFHifiGANPeriodDiscriminator(
config.period_scales[i],
out_channels=config.out_channels,
n_layers=config.n_layers,
kernel_size=config.kernel_size,
strides=config.strides,
filters=config.filters,
filter_scales=config.filter_scales,
max_filters=config.max_filters,
nonlinear_activation=config.nonlinear_activation,
nonlinear_activation_params=config.nonlinear_activation_params,
initializer_seed=config.initializer_seed,
is_weight_norm=config.is_weight_norm,
name="hifigan_period_discriminator_._{}".format(i),
)
]
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: list of each discriminator outputs
"""
outs = []
for f in self.discriminator:
outs += [f(x)]
return outs
| 13,272 | 33.928947 | 91 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/fastspeech2.py | # -*- coding: utf-8 -*-
# Copyright 2020 The FastSpeech2 Authors and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Model modules for FastSpeech2."""
import tensorflow as tf
from tensorflow_tts.models.fastspeech import TFFastSpeech, get_initializer
class TFFastSpeechVariantPredictor(tf.keras.layers.Layer):
"""FastSpeech duration predictor module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_layers = []
for i in range(config.variant_prediction_num_conv_layers):
self.conv_layers.append(
tf.keras.layers.Conv1D(
config.variant_predictor_filter,
config.variant_predictor_kernel_size,
padding="same",
name="conv_._{}".format(i),
)
)
self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu))
self.conv_layers.append(
tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm_._{}".format(i)
)
)
self.conv_layers.append(
tf.keras.layers.Dropout(config.variant_predictor_dropout_rate)
)
self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)
self.output_layer = tf.keras.layers.Dense(1)
if config.n_speakers > 1:
self.decoder_speaker_embeddings = tf.keras.layers.Embedding(
config.n_speakers,
config.encoder_self_attention_params.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=config.encoder_self_attention_params.hidden_size,
name="speaker_fc",
)
self.config = config
def call(self, inputs, training=False):
"""Call logic."""
encoder_hidden_states, speaker_ids, attention_mask = inputs
attention_mask = tf.cast(
tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype
)
if self.config.n_speakers > 1:
speaker_embeddings = self.decoder_speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
encoder_hidden_states += extended_speaker_features
# mask encoder hidden states
masked_encoder_hidden_states = encoder_hidden_states * attention_mask
# pass though first layer
outputs = self.conv_layers_sequence(masked_encoder_hidden_states)
outputs = self.output_layer(outputs)
masked_outputs = outputs * attention_mask
outputs = tf.squeeze(masked_outputs, -1)
return outputs
class TFFastSpeech2(TFFastSpeech):
"""TF Fastspeech module."""
def __init__(self, config, **kwargs):
"""Init layers for fastspeech."""
super().__init__(config, **kwargs)
self.f0_predictor = TFFastSpeechVariantPredictor(
config, dtype=tf.float32, name="f0_predictor"
)
self.energy_predictor = TFFastSpeechVariantPredictor(
config, dtype=tf.float32, name="energy_predictor",
)
self.duration_predictor = TFFastSpeechVariantPredictor(
config, dtype=tf.float32, name="duration_predictor"
)
# define f0_embeddings and energy_embeddings
self.f0_embeddings = tf.keras.layers.Conv1D(
filters=config.encoder_self_attention_params.hidden_size,
kernel_size=9,
padding="same",
name="f0_embeddings",
)
self.f0_dropout = tf.keras.layers.Dropout(0.5)
self.energy_embeddings = tf.keras.layers.Conv1D(
filters=config.encoder_self_attention_params.hidden_size,
kernel_size=9,
padding="same",
name="energy_embeddings",
)
self.energy_dropout = tf.keras.layers.Dropout(0.5)
def _build(self):
"""Dummy input for building model."""
# fake inputs
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
f0_gts = tf.convert_to_tensor(
[[10, 10, 10, 10, 10, 10, 10, 10, 10, 10]], tf.float32
)
energy_gts = tf.convert_to_tensor(
[[10, 10, 10, 10, 10, 10, 10, 10, 10, 10]], tf.float32
)
self(
input_ids=input_ids,
speaker_ids=speaker_ids,
duration_gts=duration_gts,
f0_gts=f0_gts,
energy_gts=energy_gts,
)
def call(
self,
input_ids,
speaker_ids,
duration_gts,
f0_gts,
energy_gts,
training=False,
**kwargs,
):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=training)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=training
)
last_encoder_hidden_states = encoder_output[0]
# energy predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for energy_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask]
) # [batch_size, length]
f0_outputs = self.f0_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=training
)
energy_outputs = self.energy_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=training
)
f0_embedding = self.f0_embeddings(
tf.expand_dims(f0_gts, 2)
) # [barch_size, mel_length, feature]
energy_embedding = self.energy_embeddings(
tf.expand_dims(energy_gts, 2)
) # [barch_size, mel_length, feature]
# apply dropout both training/inference
f0_embedding = self.f0_dropout(f0_embedding, training=True)
energy_embedding = self.energy_dropout(energy_embedding, training=True)
# sum features
last_encoder_hidden_states += f0_embedding + energy_embedding
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_gts], training=training
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=training,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mels_before = self.mel_dense(last_decoder_hidden_states)
mels_after = (
self.postnet([mels_before, encoder_masks], training=training) + mels_before
)
outputs = (
mels_before,
mels_after,
duration_outputs,
f0_outputs,
energy_outputs,
)
return outputs
def _inference(
self, input_ids, speaker_ids, speed_ratios, f0_ratios, energy_ratios, **kwargs,
):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=False)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=False
)
last_encoder_hidden_states = encoder_output[0]
# expand ratios
speed_ratios = tf.expand_dims(speed_ratios, 1) # [B, 1]
f0_ratios = tf.expand_dims(f0_ratios, 1) # [B, 1]
energy_ratios = tf.expand_dims(energy_ratios, 1) # [B, 1]
# energy predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for energy_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask]
) # [batch_size, length]
duration_outputs = tf.nn.relu(tf.math.exp(duration_outputs) - 1.0)
duration_outputs = tf.cast(
tf.math.round(duration_outputs * speed_ratios), tf.int32
)
f0_outputs = self.f0_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=False
)
f0_outputs *= f0_ratios
energy_outputs = self.energy_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=False
)
energy_outputs *= energy_ratios
f0_embedding = self.f0_dropout(
self.f0_embeddings(tf.expand_dims(f0_outputs, 2)), training=True
)
energy_embedding = self.energy_dropout(
self.energy_embeddings(tf.expand_dims(energy_outputs, 2)), training=True
)
# sum features
last_encoder_hidden_states += f0_embedding + energy_embedding
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_outputs], training=False
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=False,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mel_before = self.mel_dense(last_decoder_hidden_states)
mel_after = (
self.postnet([mel_before, encoder_masks], training=False) + mel_before
)
outputs = (mel_before, mel_after, duration_outputs, f0_outputs, energy_outputs)
return outputs
def setup_inference_fn(self):
self.inference = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[None, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="speed_ratios"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="f0_ratios"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="energy_ratios"),
],
)
self.inference_tflite = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[1, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="speed_ratios"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="f0_ratios"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="energy_ratios"),
],
)
| 12,399 | 38.616613 | 100 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/fastspeech.py | # -*- coding: utf-8 -*-
# Copyright 2020 The FastSpeech Authors, The HuggingFace Inc. team and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Model modules for FastSpeech."""
import numpy as np
import tensorflow as tf
from tensorflow_tts.models import BaseModel
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
def gelu(x):
"""Gaussian Error Linear unit."""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Smoother gaussian Error Linear Unit."""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
"""Swish activation function."""
return tf.nn.swish(x)
def mish(x):
return x * tf.math.tanh(tf.math.softplus(x))
ACT2FN = {
"identity": tf.keras.layers.Activation("linear"),
"tanh": tf.keras.layers.Activation("tanh"),
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
"gelu_new": tf.keras.layers.Activation(gelu_new),
"mish": tf.keras.layers.Activation(mish),
}
class TFEmbedding(tf.keras.layers.Embedding):
"""Faster version of embedding."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def call(self, inputs):
inputs = tf.cast(inputs, tf.int32)
outputs = tf.gather(self.embeddings, inputs)
return outputs
class TFFastSpeechEmbeddings(tf.keras.layers.Layer):
"""Construct charactor/phoneme/positional/speaker embeddings."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.hidden_size = config.encoder_self_attention_params.hidden_size
self.initializer_range = config.initializer_range
self.config = config
self.position_embeddings = TFEmbedding(
config.max_position_embeddings + 1,
self.hidden_size,
weights=[
self._sincos_embedding(
self.hidden_size, self.config.max_position_embeddings
)
],
name="position_embeddings",
trainable=False,
)
if config.n_speakers > 1:
self.encoder_speaker_embeddings = TFEmbedding(
config.n_speakers,
self.hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=self.hidden_size, name="speaker_fc"
)
def build(self, input_shape):
"""Build shared charactor/phoneme embedding layers."""
with tf.name_scope("charactor_embeddings"):
self.charactor_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(self, inputs, training=False):
"""Get charactor embeddings of inputs.
Args:
1. charactor, Tensor (int32) shape [batch_size, length].
2. speaker_id, Tensor (int32) shape [batch_size]
Returns:
Tensor (float32) shape [batch_size, length, embedding_size].
"""
return self._embedding(inputs, training=training)
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, speaker_ids = inputs
input_shape = tf.shape(input_ids)
seq_length = input_shape[1]
position_ids = tf.range(1, seq_length + 1, dtype=tf.int32)[tf.newaxis, :]
# create embeddings
inputs_embeds = tf.gather(self.charactor_embeddings, input_ids)
position_embeddings = self.position_embeddings(position_ids)
# sum embedding
embeddings = inputs_embeds + tf.cast(position_embeddings, inputs_embeds.dtype)
if self.config.n_speakers > 1:
speaker_embeddings = self.encoder_speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
embeddings += extended_speaker_features
return embeddings
def _sincos_embedding(
self, hidden_size, max_positional_embedding,
):
position_enc = np.array(
[
[
pos / np.power(10000, 2.0 * (i // 2) / hidden_size)
for i in range(hidden_size)
]
for pos in range(max_positional_embedding + 1)
]
)
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])
# pad embedding.
position_enc[0] = 0.0
return position_enc
def resize_positional_embeddings(self, new_size):
self.position_embeddings = TFEmbedding(
new_size + 1,
self.hidden_size,
weights=[self._sincos_embedding(self.hidden_size, new_size)],
name="position_embeddings",
trainable=False,
)
class TFFastSpeechSelfAttention(tf.keras.layers.Layer):
"""Self attention module for fastspeech."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.all_head_size = self.num_attention_heads * config.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="query",
)
self.key = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="key",
)
self.value = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="value",
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
self.config = config
def transpose_for_scores(self, x, batch_size):
"""Transpose to calculate attention scores."""
x = tf.reshape(
x,
(batch_size, -1, self.num_attention_heads, self.config.attention_head_size),
)
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
batch_size = tf.shape(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
dk = tf.cast(
tf.shape(key_layer)[-1], attention_scores.dtype
) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# extended_attention_masks for self attention encoder.
extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
extended_attention_mask = tf.cast(
extended_attention_mask, attention_scores.dtype
)
extended_attention_mask = (1.0 - extended_attention_mask) * -1e9
attention_scores = attention_scores + extended_attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
attention_probs = self.dropout(attention_probs, training=training)
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size))
outputs = (
(context_layer, attention_probs)
if self.output_attentions
else (context_layer,)
)
return outputs
class TFFastSpeechSelfOutput(tf.keras.layers.Layer):
"""Fastspeech output of self attention module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, input_tensor = inputs
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFFastSpeechAttention(tf.keras.layers.Layer):
"""Fastspeech attention module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.self_attention = TFFastSpeechSelfAttention(config, name="self")
self.dense_output = TFFastSpeechSelfOutput(config, name="output")
def call(self, inputs, training=False):
input_tensor, attention_mask = inputs
self_outputs = self.self_attention(
[input_tensor, attention_mask], training=training
)
attention_output = self.dense_output(
[self_outputs[0], input_tensor], training=training
)
masked_attention_output = attention_output * tf.cast(
tf.expand_dims(attention_mask, 2), dtype=attention_output.dtype
)
outputs = (masked_attention_output,) + self_outputs[
1:
] # add attentions if we output them
return outputs
class TFFastSpeechIntermediate(tf.keras.layers.Layer):
"""Intermediate representation module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv1d_1 = tf.keras.layers.Conv1D(
config.intermediate_size,
kernel_size=config.intermediate_kernel_size,
kernel_initializer=get_initializer(config.initializer_range),
padding="same",
name="conv1d_1",
)
self.conv1d_2 = tf.keras.layers.Conv1D(
config.hidden_size,
kernel_size=config.intermediate_kernel_size,
kernel_initializer=get_initializer(config.initializer_range),
padding="same",
name="conv1d_2",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def call(self, inputs):
"""Call logic."""
hidden_states, attention_mask = inputs
hidden_states = self.conv1d_1(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.conv1d_2(hidden_states)
masked_hidden_states = hidden_states * tf.cast(
tf.expand_dims(attention_mask, 2), dtype=hidden_states.dtype
)
return masked_hidden_states
class TFFastSpeechOutput(tf.keras.layers.Layer):
"""Output module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, input_tensor = inputs
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFFastSpeechLayer(tf.keras.layers.Layer):
"""Fastspeech module (FFT module on the paper)."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.attention = TFFastSpeechAttention(config, name="attention")
self.intermediate = TFFastSpeechIntermediate(config, name="intermediate")
self.bert_output = TFFastSpeechOutput(config, name="output")
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
attention_outputs = self.attention(
[hidden_states, attention_mask], training=training
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(
[attention_output, attention_mask], training=training
)
layer_output = self.bert_output(
[intermediate_output, attention_output], training=training
)
masked_layer_output = layer_output * tf.cast(
tf.expand_dims(attention_mask, 2), dtype=layer_output.dtype
)
outputs = (masked_layer_output,) + attention_outputs[
1:
] # add attentions if we output them
return outputs
class TFFastSpeechEncoder(tf.keras.layers.Layer):
"""Fast Speech encoder module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = [
TFFastSpeechLayer(config, name="layer_._{}".format(i))
for i in range(config.num_hidden_layers)
]
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
all_hidden_states = ()
all_attentions = ()
for _, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
[hidden_states, attention_mask], training=training
)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # outputs, (hidden states), (attentions)
class TFFastSpeechDecoder(TFFastSpeechEncoder):
"""Fast Speech decoder module."""
def __init__(self, config, **kwargs):
self.is_compatible_encoder = kwargs.pop("is_compatible_encoder", True)
super().__init__(config, **kwargs)
self.config = config
# create decoder positional embedding
self.decoder_positional_embeddings = TFEmbedding(
config.max_position_embeddings + 1,
config.hidden_size,
weights=[self._sincos_embedding()],
name="position_embeddings",
trainable=False,
)
if self.is_compatible_encoder is False:
self.project_compatible_decoder = tf.keras.layers.Dense(
units=config.hidden_size, name="project_compatible_decoder"
)
if config.n_speakers > 1:
self.decoder_speaker_embeddings = TFEmbedding(
config.n_speakers,
config.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=config.hidden_size, name="speaker_fc"
)
def call(self, inputs, training=False):
hidden_states, speaker_ids, encoder_mask, decoder_pos = inputs
if self.is_compatible_encoder is False:
hidden_states = self.project_compatible_decoder(hidden_states)
# calculate new hidden states.
hidden_states += tf.cast(
self.decoder_positional_embeddings(decoder_pos), hidden_states.dtype
)
if self.config.n_speakers > 1:
speaker_embeddings = self.decoder_speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
hidden_states += extended_speaker_features
return super().call([hidden_states, encoder_mask], training=training)
def _sincos_embedding(self):
position_enc = np.array(
[
[
pos / np.power(10000, 2.0 * (i // 2) / self.config.hidden_size)
for i in range(self.config.hidden_size)
]
for pos in range(self.config.max_position_embeddings + 1)
]
)
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])
# pad embedding.
position_enc[0] = 0.0
return position_enc
class TFTacotronPostnet(tf.keras.layers.Layer):
"""Tacotron-2 postnet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_postnet):
conv = tf.keras.layers.Conv1D(
filters=config.postnet_conv_filters
if i < config.n_conv_postnet - 1
else config.num_mels,
kernel_size=config.postnet_conv_kernel_sizes,
padding="same",
name="conv_._{}".format(i),
)
batch_norm = tf.keras.layers.BatchNormalization(
axis=-1, name="batch_norm_._{}".format(i)
)
self.conv_batch_norm.append((conv, batch_norm))
self.dropout = tf.keras.layers.Dropout(
rate=config.postnet_dropout_rate, name="dropout"
)
self.activation = [tf.nn.tanh] * (config.n_conv_postnet - 1) + [tf.identity]
def call(self, inputs, training=False):
"""Call logic."""
outputs, mask = inputs
extended_mask = tf.cast(tf.expand_dims(mask, axis=2), outputs.dtype)
for i, (conv, bn) in enumerate(self.conv_batch_norm):
outputs = conv(outputs)
outputs = bn(outputs)
outputs = self.activation[i](outputs)
outputs = self.dropout(outputs, training=training)
return outputs * extended_mask
class TFFastSpeechDurationPredictor(tf.keras.layers.Layer):
"""FastSpeech duration predictor module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_layers = []
for i in range(config.num_duration_conv_layers):
self.conv_layers.append(
tf.keras.layers.Conv1D(
config.duration_predictor_filters,
config.duration_predictor_kernel_sizes,
padding="same",
name="conv_._{}".format(i),
)
)
self.conv_layers.append(
tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm_._{}".format(i)
)
)
self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu6))
self.conv_layers.append(
tf.keras.layers.Dropout(config.duration_predictor_dropout_probs)
)
self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)
self.output_layer = tf.keras.layers.Dense(1)
def call(self, inputs, training=False):
"""Call logic."""
encoder_hidden_states, attention_mask = inputs
attention_mask = tf.cast(
tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype
)
# mask encoder hidden states
masked_encoder_hidden_states = encoder_hidden_states * attention_mask
# pass though first layer
outputs = self.conv_layers_sequence(masked_encoder_hidden_states)
outputs = self.output_layer(outputs)
masked_outputs = outputs * attention_mask
return tf.squeeze(tf.nn.relu6(masked_outputs), -1) # make sure positive value.
class TFFastSpeechLengthRegulator(tf.keras.layers.Layer):
"""FastSpeech lengthregulator module."""
def __init__(self, config, **kwargs):
"""Init variables."""
self.enable_tflite_convertible = kwargs.pop("enable_tflite_convertible", False)
super().__init__(**kwargs)
self.config = config
def call(self, inputs, training=False):
"""Call logic.
Args:
1. encoder_hidden_states, Tensor (float32) shape [batch_size, length, hidden_size]
2. durations_gt, Tensor (float32/int32) shape [batch_size, length]
"""
encoder_hidden_states, durations_gt = inputs
outputs, encoder_masks = self._length_regulator(
encoder_hidden_states, durations_gt
)
return outputs, encoder_masks
def _length_regulator(self, encoder_hidden_states, durations_gt):
"""Length regulator logic."""
sum_durations = tf.reduce_sum(durations_gt, axis=-1) # [batch_size]
max_durations = tf.reduce_max(sum_durations)
input_shape = tf.shape(encoder_hidden_states)
batch_size = input_shape[0]
hidden_size = input_shape[-1]
# initialize output hidden states and encoder masking.
if self.enable_tflite_convertible:
# There is only 1 batch in inference, so we don't have to use
# `tf.While` op with 3-D output tensor.
repeats = durations_gt[0]
real_length = tf.reduce_sum(repeats)
pad_size = max_durations - real_length
# masks : [max_durations]
masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32)
repeat_encoder_hidden_states = tf.repeat(
encoder_hidden_states[0], repeats=repeats, axis=0
)
repeat_encoder_hidden_states = tf.expand_dims(
tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0
) # [1, max_durations, hidden_size]
outputs = repeat_encoder_hidden_states
encoder_masks = masks
else:
outputs = tf.zeros(
shape=[0, max_durations, hidden_size], dtype=encoder_hidden_states.dtype
)
encoder_masks = tf.zeros(shape=[0, max_durations], dtype=tf.int32)
def condition(
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
):
return tf.less(i, batch_size)
def body(
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
):
repeats = durations_gt[i]
real_length = tf.reduce_sum(repeats)
pad_size = max_durations - real_length
masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32)
repeat_encoder_hidden_states = tf.repeat(
encoder_hidden_states[i], repeats=repeats, axis=0
)
repeat_encoder_hidden_states = tf.expand_dims(
tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0
) # [1, max_durations, hidden_size]
outputs = tf.concat([outputs, repeat_encoder_hidden_states], axis=0)
encoder_masks = tf.concat([encoder_masks, masks], axis=0)
return [
i + 1,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
]
# initialize iteration i.
i = tf.constant(0, dtype=tf.int32)
_, _, outputs, encoder_masks, _, _, _, = tf.while_loop(
condition,
body,
[
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
],
shape_invariants=[
i.get_shape(),
batch_size.get_shape(),
tf.TensorShape(
[
None,
None,
self.config.encoder_self_attention_params.hidden_size,
]
),
tf.TensorShape([None, None]),
encoder_hidden_states.get_shape(),
durations_gt.get_shape(),
max_durations.get_shape(),
],
)
return outputs, encoder_masks
class TFFastSpeech(BaseModel):
"""TF Fastspeech module."""
def __init__(self, config, **kwargs):
"""Init layers for fastspeech."""
self.enable_tflite_convertible = kwargs.pop("enable_tflite_convertible", False)
super().__init__(**kwargs)
self.embeddings = TFFastSpeechEmbeddings(config, name="embeddings")
self.encoder = TFFastSpeechEncoder(
config.encoder_self_attention_params, name="encoder"
)
self.duration_predictor = TFFastSpeechDurationPredictor(
config, dtype=tf.float32, name="duration_predictor"
)
self.length_regulator = TFFastSpeechLengthRegulator(
config,
enable_tflite_convertible=self.enable_tflite_convertible,
name="length_regulator",
)
self.decoder = TFFastSpeechDecoder(
config.decoder_self_attention_params,
is_compatible_encoder=config.encoder_self_attention_params.hidden_size
== config.decoder_self_attention_params.hidden_size,
name="decoder",
)
self.mel_dense = tf.keras.layers.Dense(
units=config.num_mels, dtype=tf.float32, name="mel_before"
)
self.postnet = TFTacotronPostnet(
config=config, dtype=tf.float32, name="postnet"
)
self.setup_inference_fn()
def _build(self):
"""Dummy input for building model."""
# fake inputs
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
self(input_ids, speaker_ids, duration_gts)
def resize_positional_embeddings(self, new_size):
self.embeddings.resize_positional_embeddings(new_size)
self._build()
def call(
self, input_ids, speaker_ids, duration_gts, training=False, **kwargs,
):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=training)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=training
)
last_encoder_hidden_states = encoder_output[0]
# duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for duration_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, attention_mask]
) # [batch_size, length]
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_gts], training=training
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=training,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mel_before = self.mel_dense(last_decoder_hidden_states)
mel_after = (
self.postnet([mel_before, encoder_masks], training=training) + mel_before
)
outputs = (mel_before, mel_after, duration_outputs)
return outputs
def _inference(self, input_ids, speaker_ids, speed_ratios, **kwargs):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=False)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=False
)
last_encoder_hidden_states = encoder_output[0]
# duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for duration_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, attention_mask]
) # [batch_size, length]
duration_outputs = tf.math.exp(duration_outputs) - 1.0
if speed_ratios is None:
speed_ratios = tf.convert_to_tensor(np.array([1.0]), dtype=tf.float32)
speed_ratios = tf.expand_dims(speed_ratios, 1)
duration_outputs = tf.cast(
tf.math.round(duration_outputs * speed_ratios), tf.int32
)
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_outputs], training=False
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=False,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mel_before = self.mel_dense(last_decoder_hidden_states)
mel_after = (
self.postnet([mel_before, encoder_masks], training=False) + mel_before
)
outputs = (mel_before, mel_after, duration_outputs)
return outputs
def setup_inference_fn(self):
self.inference = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[None, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="speed_ratios"),
],
)
self.inference_tflite = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[1, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="speed_ratios"),
],
)
| 33,971 | 36.372937 | 102 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/optimizers/adamweightdecay.py | # -*- coding: utf-8 -*-
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdamW for training self-attention."""
import re
import tensorflow as tf
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Applys a warmup schedule on a given learning rate decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_schedule_fn,
warmup_steps,
power=1.0,
name=None,
):
super(WarmUp, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = global_step_float / warmup_steps_float
warmup_learning_rate = self.initial_learning_rate * tf.math.pow(
warmup_percent_done, self.power
)
return tf.cond(
global_step_float < warmup_steps_float,
lambda: warmup_learning_rate,
lambda: self.decay_schedule_fn(step),
name=name,
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
class AdamWeightDecay(tf.keras.optimizers.Adam):
"""Adam enables L2 weight decay and clip_by_global_norm on gradients.
Just adding the square of the weights to the loss function is *not* the
correct way of using L2 regularization/weight decay with Adam, since that will
interact with the m and v parameters in strange ways.
Instead we want ot decay the weights in a manner that doesn't interact with
the m/v parameters. This is equivalent to adding the square of the weights to
the loss with plain (non-momentum) SGD.
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay_rate=0.0,
include_in_weight_decay=None,
exclude_from_weight_decay=None,
name="AdamWeightDecay",
**kwargs
):
super(AdamWeightDecay, self).__init__(
learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs
)
self.weight_decay_rate = weight_decay_rate
self._include_in_weight_decay = include_in_weight_decay
self._exclude_from_weight_decay = exclude_from_weight_decay
@classmethod
def from_config(cls, config):
"""Creates an optimizer from its config with WarmUp custom object."""
custom_objects = {"WarmUp": WarmUp}
return super(AdamWeightDecay, cls).from_config(
config, custom_objects=custom_objects
)
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype, apply_state)
apply_state["weight_decay_rate"] = tf.constant(
self.weight_decay_rate, name="adam_weight_decay_rate"
)
def _decay_weights_op(self, var, learning_rate, apply_state):
do_decay = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state["weight_decay_rate"],
use_locking=self._use_locking,
)
return tf.no_op()
def apply_gradients(self, grads_and_vars, clip_norm=0.5, **kwargs):
grads, tvars = list(zip(*grads_and_vars))
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=clip_norm)
return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars), **kwargs)
def _get_lr(self, var_device, var_dtype, apply_state):
"""Retrieves the learning rate with the given state."""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
apply_state = apply_state or {}
coefficients = apply_state.get((var_device, var_dtype))
if coefficients is None:
coefficients = self._fallback_apply_state(var_device, var_dtype)
apply_state[(var_device, var_dtype)] = coefficients
return coefficients["lr_t"], dict(apply_state=apply_state)
def _resource_apply_dense(self, grad, var, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay, self)._resource_apply_dense(
grad, var, **kwargs
)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay, self)._resource_apply_sparse(
grad, var, indices, **kwargs
)
def get_config(self):
config = super(AdamWeightDecay, self).get_config()
config.update(
{"weight_decay_rate": self.weight_decay_rate,}
)
return config
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(r, param_name) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
| 6,854 | 37.511236 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/utils.py | # -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Utility functions."""
import fnmatch
import os
import re
import tempfile
from pathlib import Path
import tensorflow as tf
MODEL_FILE_NAME = "model.h5"
CONFIG_FILE_NAME = "config.yml"
PROCESSOR_FILE_NAME = "processor.json"
LIBRARY_NAME = "tensorflow_tts"
CACHE_DIRECTORY = os.path.join(Path.home(), ".cache", LIBRARY_NAME)
def find_files(root_dir, query="*.wav", include_root_dir=True):
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (str): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
list: List of found filenames.
"""
files = []
for root, _, filenames in os.walk(root_dir, followlinks=True):
for filename in fnmatch.filter(filenames, query):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
def _path_requires_gfile(filepath):
"""Checks if the given path requires use of GFile API.
Args:
filepath (str): Path to check.
Returns:
bool: True if the given path needs GFile API to access, such as
"s3://some/path" and "gs://some/path".
"""
# If the filepath contains a protocol (e.g. "gs://"), it should be handled
# using TensorFlow GFile API.
return bool(re.match(r"^[a-z]+://", filepath))
def save_weights(model, filepath):
"""Save model weights.
Same as model.save_weights(filepath), but supports saving to S3 or GCS
buckets using TensorFlow GFile API.
Args:
model (tf.keras.Model): Model to save.
filepath (str): Path to save the model weights to.
"""
if not _path_requires_gfile(filepath):
model.save_weights(filepath)
return
# Save to a local temp file and copy to the desired path using GFile API.
_, ext = os.path.splitext(filepath)
with tempfile.NamedTemporaryFile(suffix=ext) as temp_file:
model.save_weights(temp_file.name)
# To preserve the original semantics, we need to overwrite the target
# file.
tf.io.gfile.copy(temp_file.name, filepath, overwrite=True)
def load_weights(model, filepath):
"""Load model weights.
Same as model.load_weights(filepath), but supports loading from S3 or GCS
buckets using TensorFlow GFile API.
Args:
model (tf.keras.Model): Model to load weights to.
filepath (str): Path to the weights file.
"""
if not _path_requires_gfile(filepath):
model.load_weights(filepath)
return
# Make a local copy and load it.
_, ext = os.path.splitext(filepath)
with tempfile.NamedTemporaryFile(suffix=ext) as temp_file:
# The target temp_file should be created above, so we need to overwrite.
tf.io.gfile.copy(filepath, temp_file.name, overwrite=True)
model.load_weights(temp_file.name)
| 3,053 | 30.163265 | 80 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/group_conv.py | # -*- coding: utf-8 -*-
# This code is copy from https://github.com/tensorflow/tensorflow/pull/36773.
"""Group Convolution Modules."""
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations, constraints, initializers, regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers import Conv1D, SeparableConv1D
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.ops import array_ops, nn, nn_ops
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
filter_shape: static shape of the filter. i.e. filter.get_shape().
padding: see convolution.
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: see convolution.
"""
def __init__(
self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
):
"""Helper function for convolution."""
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError("input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError("filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + 1
)
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)
spatial_dims = range(2, num_spatial_dims + 2)
filter_dim = tensor_shape.dimension_at_index(filter_shape, num_spatial_dims)
if not (input_channels_dim % filter_dim).is_compatible_with(0):
raise ValueError(
"number of input channels is not divisible by corresponding "
"dimension of filter, {} % {} != 0".format(
input_channels_dim, filter_dim
)
)
strides, dilation_rate = nn_ops._get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate
)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.padding = padding
self.name = name
self.dilation_rate = dilation_rate
self.conv_op = nn_ops._WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format,
)
def _build_op(self, _, padding):
return nn_ops._NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name,
)
def __call__(self, inp, filter):
return self.conv_op(inp, filter)
class Conv(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Note: layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
groups: Integer, the number of channel groups controlling the connections
between inputs and outputs. Input channels and `filters` must both be
divisible by `groups`. For example,
- At `groups=1`, all inputs are convolved to all outputs.
- At `groups=2`, the operation becomes equivalent to having two
convolutional layers side by side, each seeing half the input
channels, and producing half the output channels, and both
subsequently concatenated.
- At `groups=input_channels`, each input channel is convolved with its
own set of filters, of size `input_channels / filters`
activation: Activation function to use.
If you don't specify anything, no activation is applied.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(
self,
rank,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs
):
super(Conv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs
)
self.rank = rank
if filters is not None and not isinstance(filters, int):
filters = int(filters)
self.filters = filters
self.groups = groups or 1
if filters is not None and filters % self.groups != 0:
raise ValueError(
"The number of filters must be evenly divisible by the number of "
"groups. Received: groups={}, filters={}".format(groups, filters)
)
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, "kernel_size")
if not all(self.kernel_size):
raise ValueError(
"The argument `kernel_size` cannot contain 0(s). "
"Received: %s" % (kernel_size,)
)
self.strides = conv_utils.normalize_tuple(strides, rank, "strides")
self.padding = conv_utils.normalize_padding(padding)
if self.padding == "causal" and not isinstance(self, (Conv1D, SeparableConv1D)):
raise ValueError(
"Causal padding is only supported for `Conv1D`"
"and ``SeparableConv1D`."
)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, "dilation_rate"
)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
if input_channel % self.groups != 0:
raise ValueError(
"The number of input channels must be evenly divisible by the number "
"of groups. Received groups={}, but the input has {} channels "
"(full input shape is {}).".format(
self.groups, input_channel, input_shape
)
)
kernel_shape = self.kernel_size + (input_channel // self.groups, self.filters)
self.kernel = self.add_weight(
name="kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_channel}
)
self._build_conv_op_input_shape = input_shape
self._build_input_channel = input_channel
self._padding_op = self._get_padding_op()
self._conv_op_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2
)
self._convolution_op = Convolution(
input_shape,
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format,
)
self.built = True
def call(self, inputs):
if self._recreate_conv_op(inputs):
self._convolution_op = Convolution(
inputs.get_shape(),
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format,
)
self._build_conv_op_input_shape = inputs.get_shape()
# Apply causal padding to inputs for Conv1D.
if self.padding == "causal" and self.__class__.__name__ == "Conv1D":
inputs = array_ops.pad(inputs, self._compute_causal_padding())
outputs = self._convolution_op(inputs, self.kernel)
if self.use_bias:
if self.data_format == "channels_first":
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = nn.bias_add(outputs, self.bias, data_format="NCHW")
else:
outputs = nn.bias_add(outputs, self.bias, data_format="NHWC")
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == "channels_last":
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i],
)
new_space.append(new_dim)
return tensor_shape.TensorShape(
[input_shape[0]] + new_space + [self.filters]
)
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i],
)
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] + new_space)
def get_config(self):
config = {
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"groups": self.groups,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(self.activity_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super(Conv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == "channels_last":
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == "channels_first":
return 1
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError(
"The channel dimension of the inputs "
"should be defined. Found `None`."
)
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == "causal":
op_padding = "valid"
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
def _recreate_conv_op(self, inputs):
"""Recreate conv_op if necessary.
Check if the input_shape in call() is different from that in build().
For the values that are not None, if they are different, recreate
the _convolution_op to avoid the stateful behavior.
Args:
inputs: The input data to call() method.
Returns:
`True` or `False` to indicate whether to recreate the conv_op.
"""
call_input_shape = inputs.get_shape()
for axis in range(1, len(call_input_shape)):
if (
call_input_shape[axis] is not None
and self._build_conv_op_input_shape[axis] is not None
and call_input_shape[axis] != self._build_conv_op_input_shape[axis]
):
return True
return False
class GroupConv1D(Conv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Examples:
>>> # The inputs are 128-length vectors with 10 timesteps, and the batch size
>>> # is 4.
>>> input_shape = (4, 10, 128)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv1D(
... 32, 3, activation='relu',input_shape=input_shape)(x)
>>> print(y.shape)
(4, 8, 32)
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive).
`"causal"` results in causal (dilated) convolutions, e.g. `output[t]`
does not depend on `input[t+1:]`. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
groups: Integer, the number of channel groups controlling the connections
between inputs and outputs. Input channels and `filters` must both be
divisible by `groups`. For example,
- At `groups=1`, all inputs are convolved to all outputs.
- At `groups=2`, the operation becomes equivalent to having two
convolutional layers side by side, each seeing half the input
channels, and producing half the output channels, and both
subsequently concatenated.
- At `groups=input_channels`, each input channel is convolved with its
own set of filters, of size `input_channels / filters`
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`).
bias_initializer: Initializer for the bias vector (
see `keras.initializers`).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (
see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
Returns:
A tensor of rank 3 representing
`activation(conv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format="channels_last",
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
):
super().__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs
)
| 23,944 | 41.989228 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/griffin_lim.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Griffin-Lim phase reconstruction algorithm from mel spectrogram."""
import os
import librosa
import numpy as np
import soundfile as sf
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
def griffin_lim_lb(
mel_spec, stats_path, dataset_config, n_iter=32, output_dir=None, wav_name="lb"
):
"""Generate wave from mel spectrogram with Griffin-Lim algorithm using Librosa.
Args:
mel_spec (ndarray): array representing the mel spectrogram.
stats_path (str): path to the `stats.npy` file containing norm statistics.
dataset_config (Dict): dataset configuration parameters.
n_iter (int): number of iterations for GL.
output_dir (str): output directory where audio file will be saved.
wav_name (str): name of the output file.
Returns:
gl_lb (ndarray): generated wave.
"""
scaler = StandardScaler()
scaler.mean_, scaler.scale_ = np.load(stats_path)
mel_spec = np.power(10.0, scaler.inverse_transform(mel_spec)).T
mel_basis = librosa.filters.mel(
dataset_config["sampling_rate"],
n_fft=dataset_config["fft_size"],
n_mels=dataset_config["num_mels"],
fmin=dataset_config["fmin"],
fmax=dataset_config["fmax"],
)
mel_to_linear = np.maximum(1e-10, np.dot(np.linalg.pinv(mel_basis), mel_spec))
gl_lb = librosa.griffinlim(
mel_to_linear,
n_iter=n_iter,
hop_length=dataset_config["hop_size"],
win_length=dataset_config["win_length"] or dataset_config["fft_size"],
)
if output_dir:
output_path = os.path.join(output_dir, f"{wav_name}.wav")
sf.write(output_path, gl_lb, dataset_config["sampling_rate"], "PCM_16")
return gl_lb
class TFGriffinLim(tf.keras.layers.Layer):
"""Griffin-Lim algorithm for phase reconstruction from mel spectrogram magnitude."""
def __init__(self, stats_path, dataset_config, normalized: bool = True):
"""Init GL params.
Args:
stats_path (str): path to the `stats.npy` file containing norm statistics.
dataset_config (Dict): dataset configuration parameters.
"""
super().__init__()
self.normalized = normalized
if normalized:
scaler = StandardScaler()
scaler.mean_, scaler.scale_ = np.load(stats_path)
self.scaler = scaler
self.ds_config = dataset_config
self.mel_basis = librosa.filters.mel(
self.ds_config["sampling_rate"],
n_fft=self.ds_config["fft_size"],
n_mels=self.ds_config["num_mels"],
fmin=self.ds_config["fmin"],
fmax=self.ds_config["fmax"],
) # [num_mels, fft_size // 2 + 1]
def save_wav(self, gl_tf, output_dir, wav_name):
"""Generate WAV file and save it.
Args:
gl_tf (tf.Tensor): reconstructed signal from GL algorithm.
output_dir (str): output directory where audio file will be saved.
wav_name (str): name of the output file.
"""
encode_fn = lambda x: tf.audio.encode_wav(x, self.ds_config["sampling_rate"])
gl_tf = tf.expand_dims(gl_tf, -1)
if not isinstance(wav_name, list):
wav_name = [wav_name]
if len(gl_tf.shape) > 2:
bs, *_ = gl_tf.shape
assert bs == len(wav_name), "Batch and 'wav_name' have different size."
tf_wav = tf.map_fn(encode_fn, gl_tf, dtype=tf.string)
for idx in tf.range(bs):
output_path = os.path.join(output_dir, f"{wav_name[idx]}.wav")
tf.io.write_file(output_path, tf_wav[idx])
else:
tf_wav = encode_fn(gl_tf)
tf.io.write_file(os.path.join(output_dir, f"{wav_name[0]}.wav"), tf_wav)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, None], dtype=tf.float32),
tf.TensorSpec(shape=[], dtype=tf.int32),
]
)
def call(self, mel_spec, n_iter=32):
"""Apply GL algorithm to batched mel spectrograms.
Args:
mel_spec (tf.Tensor): normalized mel spectrogram.
n_iter (int): number of iterations to run GL algorithm.
Returns:
(tf.Tensor): reconstructed signal from GL algorithm.
"""
# de-normalize mel spectogram
if self.normalized:
mel_spec = tf.math.pow(
10.0, mel_spec * self.scaler.scale_ + self.scaler.mean_
)
else:
mel_spec = tf.math.pow(
10.0, mel_spec
) # TODO @dathudeptrai check if its ok without it wavs were too quiet
inverse_mel = tf.linalg.pinv(self.mel_basis)
# [:, num_mels] @ [fft_size // 2 + 1, num_mels].T
mel_to_linear = tf.linalg.matmul(mel_spec, inverse_mel, transpose_b=True)
mel_to_linear = tf.cast(tf.math.maximum(1e-10, mel_to_linear), tf.complex64)
init_phase = tf.cast(
tf.random.uniform(tf.shape(mel_to_linear), maxval=1), tf.complex64
)
phase = tf.math.exp(2j * np.pi * init_phase)
for _ in tf.range(n_iter):
inverse = tf.signal.inverse_stft(
mel_to_linear * phase,
frame_length=self.ds_config["win_length"] or self.ds_config["fft_size"],
frame_step=self.ds_config["hop_size"],
fft_length=self.ds_config["fft_size"],
window_fn=tf.signal.inverse_stft_window_fn(self.ds_config["hop_size"]),
)
phase = tf.signal.stft(
inverse,
self.ds_config["win_length"] or self.ds_config["fft_size"],
self.ds_config["hop_size"],
self.ds_config["fft_size"],
)
phase /= tf.cast(tf.maximum(1e-10, tf.abs(phase)), tf.complex64)
return tf.signal.inverse_stft(
mel_to_linear * phase,
frame_length=self.ds_config["win_length"] or self.ds_config["fft_size"],
frame_step=self.ds_config["hop_size"],
fft_length=self.ds_config["fft_size"],
window_fn=tf.signal.inverse_stft_window_fn(self.ds_config["hop_size"]),
)
| 6,824 | 39.868263 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/weight_norm.py | # -*- coding: utf-8 -*-
# Copyright 2019 The TensorFlow Probability Authors and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Weight Norm Modules."""
import warnings
import tensorflow as tf
class WeightNormalization(tf.keras.layers.Wrapper):
"""Layer wrapper to decouple magnitude and direction of the layer's weights.
This wrapper reparameterizes a layer by decoupling the weight's
magnitude and direction. This speeds up convergence by improving the
conditioning of the optimization problem. It has an optional data-dependent
initialization scheme, in which initial values of weights are set as functions
of the first minibatch of data. Both the weight normalization and data-
dependent initialization are described in [Salimans and Kingma (2016)][1].
#### Example
```python
net = WeightNorm(tf.keras.layers.Conv2D(2, 2, activation='relu'),
input_shape=(32, 32, 3), data_init=True)(x)
net = WeightNorm(tf.keras.layers.Conv2DTranspose(16, 5, activation='relu'),
data_init=True)
net = WeightNorm(tf.keras.layers.Dense(120, activation='relu'),
data_init=True)(net)
net = WeightNorm(tf.keras.layers.Dense(num_classes),
data_init=True)(net)
```
#### References
[1]: Tim Salimans and Diederik P. Kingma. Weight Normalization: A Simple
Reparameterization to Accelerate Training of Deep Neural Networks. In
_30th Conference on Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1602.07868
"""
def __init__(self, layer, data_init=True, **kwargs):
"""Initialize WeightNorm wrapper.
Args:
layer: A `tf.keras.layers.Layer` instance. Supported layer types are
`Dense`, `Conv2D`, and `Conv2DTranspose`. Layers with multiple inputs
are not supported.
data_init: `bool`, if `True` use data dependent variable initialization.
**kwargs: Additional keyword args passed to `tf.keras.layers.Wrapper`.
Raises:
ValueError: If `layer` is not a `tf.keras.layers.Layer` instance.
"""
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError(
"Please initialize `WeightNorm` layer with a `tf.keras.layers.Layer` "
"instance. You passed: {input}".format(input=layer)
)
layer_type = type(layer).__name__
if layer_type not in [
"Dense",
"Conv2D",
"Conv2DTranspose",
"Conv1D",
"GroupConv1D",
]:
warnings.warn(
"`WeightNorm` is tested only for `Dense`, `Conv2D`, `Conv1D`, `GroupConv1D`, "
"`GroupConv2D`, and `Conv2DTranspose` layers. You passed a layer of type `{}`".format(
layer_type
)
)
super().__init__(layer, **kwargs)
self.data_init = data_init
self._track_trackable(layer, name="layer")
self.filter_axis = -2 if layer_type == "Conv2DTranspose" else -1
def _compute_weights(self):
"""Generate weights with normalization."""
# Determine the axis along which to expand `g` so that `g` broadcasts to
# the shape of `v`.
new_axis = -self.filter_axis - 3
self.layer.kernel = tf.nn.l2_normalize(
self.v, axis=self.kernel_norm_axes
) * tf.expand_dims(self.g, new_axis)
def _init_norm(self):
"""Set the norm of the weight vector."""
kernel_norm = tf.sqrt(
tf.reduce_sum(tf.square(self.v), axis=self.kernel_norm_axes)
)
self.g.assign(kernel_norm)
def _data_dep_init(self, inputs):
"""Data dependent initialization."""
# Normalize kernel first so that calling the layer calculates
# `tf.dot(v, x)/tf.norm(v)` as in (5) in ([Salimans and Kingma, 2016][1]).
self._compute_weights()
activation = self.layer.activation
self.layer.activation = None
use_bias = self.layer.bias is not None
if use_bias:
bias = self.layer.bias
self.layer.bias = tf.zeros_like(bias)
# Since the bias is initialized as zero, setting the activation to zero and
# calling the initialized layer (with normalized kernel) yields the correct
# computation ((5) in Salimans and Kingma (2016))
x_init = self.layer(inputs)
norm_axes_out = list(range(x_init.shape.rank - 1))
m_init, v_init = tf.nn.moments(x_init, norm_axes_out)
scale_init = 1.0 / tf.sqrt(v_init + 1e-10)
self.g.assign(self.g * scale_init)
if use_bias:
self.layer.bias = bias
self.layer.bias.assign(-m_init * scale_init)
self.layer.activation = activation
def build(self, input_shape=None):
"""Build `Layer`.
Args:
input_shape: The shape of the input to `self.layer`.
Raises:
ValueError: If `Layer` does not contain a `kernel` of weights
"""
if not self.layer.built:
self.layer.build(input_shape)
if not hasattr(self.layer, "kernel"):
raise ValueError(
"`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights"
)
self.kernel_norm_axes = list(range(self.layer.kernel.shape.ndims))
self.kernel_norm_axes.pop(self.filter_axis)
self.v = self.layer.kernel
# to avoid a duplicate `kernel` variable after `build` is called
self.layer.kernel = None
self.g = self.add_weight(
name="g",
shape=(int(self.v.shape[self.filter_axis]),),
initializer="ones",
dtype=self.v.dtype,
trainable=True,
)
self.initialized = self.add_weight(
name="initialized", dtype=tf.bool, trainable=False
)
self.initialized.assign(False)
super().build()
def call(self, inputs):
"""Call `Layer`."""
if not self.initialized:
if self.data_init:
self._data_dep_init(inputs)
else:
# initialize `g` as the norm of the initialized kernel
self._init_norm()
self.initialized.assign(True)
self._compute_weights()
output = self.layer(inputs)
return output
def compute_output_shape(self, input_shape):
return tf.TensorShape(self.layer.compute_output_shape(input_shape).as_list())
| 7,216 | 38.010811 | 102 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/losses/stft.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""STFT-based loss modules."""
import tensorflow as tf
class TFSpectralConvergence(tf.keras.layers.Layer):
"""Spectral convergence loss."""
def __init__(self):
"""Initialize."""
super().__init__()
def call(self, y_mag, x_mag):
"""Calculate forward propagation.
Args:
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
Returns:
Tensor: Spectral convergence loss value.
"""
return tf.norm(y_mag - x_mag, ord="fro", axis=(-2, -1)) / tf.norm(
y_mag, ord="fro", axis=(-2, -1)
)
class TFLogSTFTMagnitude(tf.keras.layers.Layer):
"""Log STFT magnitude loss module."""
def __init__(self):
"""Initialize."""
super().__init__()
def call(self, y_mag, x_mag):
"""Calculate forward propagation.
Args:
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
Returns:
Tensor: Spectral convergence loss value.
"""
return tf.abs(tf.math.log(y_mag) - tf.math.log(x_mag))
class TFSTFT(tf.keras.layers.Layer):
"""STFT loss module."""
def __init__(self, frame_length=600, frame_step=120, fft_length=1024):
"""Initialize."""
super().__init__()
self.frame_length = frame_length
self.frame_step = frame_step
self.fft_length = fft_length
self.spectral_convergenge_loss = TFSpectralConvergence()
self.log_stft_magnitude_loss = TFLogSTFTMagnitude()
def call(self, y, x):
"""Calculate forward propagation.
Args:
y (Tensor): Groundtruth signal (B, T).
x (Tensor): Predicted signal (B, T).
Returns:
Tensor: Spectral convergence loss value (pre-reduce).
Tensor: Log STFT magnitude loss value (pre-reduce).
"""
x_mag = tf.abs(
tf.signal.stft(
signals=x,
frame_length=self.frame_length,
frame_step=self.frame_step,
fft_length=self.fft_length,
)
)
y_mag = tf.abs(
tf.signal.stft(
signals=y,
frame_length=self.frame_length,
frame_step=self.frame_step,
fft_length=self.fft_length,
)
)
# add small number to prevent nan value.
# compatible with pytorch version.
x_mag = tf.clip_by_value(tf.math.sqrt(x_mag ** 2 + 1e-7), 1e-7, 1e3)
y_mag = tf.clip_by_value(tf.math.sqrt(y_mag ** 2 + 1e-7), 1e-7, 1e3)
sc_loss = self.spectral_convergenge_loss(y_mag, x_mag)
mag_loss = self.log_stft_magnitude_loss(y_mag, x_mag)
return sc_loss, mag_loss
class TFMultiResolutionSTFT(tf.keras.layers.Layer):
"""Multi resolution STFT loss module."""
def __init__(
self,
fft_lengths=[1024, 2048, 512],
frame_lengths=[600, 1200, 240],
frame_steps=[120, 240, 50],
):
"""Initialize Multi resolution STFT loss module.
Args:
frame_lengths (list): List of FFT sizes.
frame_steps (list): List of hop sizes.
fft_lengths (list): List of window lengths.
"""
super().__init__()
assert len(frame_lengths) == len(frame_steps) == len(fft_lengths)
self.stft_losses = []
for frame_length, frame_step, fft_length in zip(
frame_lengths, frame_steps, fft_lengths
):
self.stft_losses.append(TFSTFT(frame_length, frame_step, fft_length))
def call(self, y, x):
"""Calculate forward propagation.
Args:
y (Tensor): Groundtruth signal (B, T).
x (Tensor): Predicted signal (B, T).
Returns:
Tensor: Multi resolution spectral convergence loss value.
Tensor: Multi resolution log STFT magnitude loss value.
"""
sc_loss = 0.0
mag_loss = 0.0
for f in self.stft_losses:
sc_l, mag_l = f(y, x)
sc_loss += tf.reduce_mean(sc_l, axis=list(range(1, len(sc_l.shape))))
mag_loss += tf.reduce_mean(mag_l, axis=list(range(1, len(mag_l.shape))))
sc_loss /= len(self.stft_losses)
mag_loss /= len(self.stft_losses)
return sc_loss, mag_loss
| 5,179 | 33.533333 | 97 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/losses/spectrogram.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spectrogram-based loss modules."""
import tensorflow as tf
class TFMelSpectrogram(tf.keras.layers.Layer):
"""Mel Spectrogram loss."""
def __init__(
self,
n_mels=80,
f_min=80.0,
f_max=7600,
frame_length=1024,
frame_step=256,
fft_length=1024,
sample_rate=16000,
**kwargs
):
"""Initialize."""
super().__init__(**kwargs)
self.frame_length = frame_length
self.frame_step = frame_step
self.fft_length = fft_length
self.linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
n_mels, fft_length // 2 + 1, sample_rate, f_min, f_max
)
def _calculate_log_mels_spectrogram(self, signals):
"""Calculate forward propagation.
Args:
signals (Tensor): signal (B, T).
Returns:
Tensor: Mel spectrogram (B, T', 80)
"""
stfts = tf.signal.stft(
signals,
frame_length=self.frame_length,
frame_step=self.frame_step,
fft_length=self.fft_length,
)
linear_spectrograms = tf.abs(stfts)
mel_spectrograms = tf.tensordot(
linear_spectrograms, self.linear_to_mel_weight_matrix, 1
)
mel_spectrograms.set_shape(
linear_spectrograms.shape[:-1].concatenate(
self.linear_to_mel_weight_matrix.shape[-1:]
)
)
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6) # prevent nan.
return log_mel_spectrograms
def call(self, y, x):
"""Calculate forward propagation.
Args:
y (Tensor): Groundtruth signal (B, T).
x (Tensor): Predicted signal (B, T).
Returns:
Tensor: Mean absolute Error Spectrogram Loss.
"""
y_mels = self._calculate_log_mels_spectrogram(y)
x_mels = self._calculate_log_mels_spectrogram(x)
return tf.reduce_mean(
tf.abs(y_mels - x_mels), axis=list(range(1, len(x_mels.shape)))
)
| 2,697 | 31.902439 | 83 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/trainers/base_trainer.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Based Trainer."""
import abc
import logging
import os
import tensorflow as tf
from tqdm import tqdm
from tensorflow_tts.optimizers import GradientAccumulator
from tensorflow_tts.utils import utils
class BasedTrainer(metaclass=abc.ABCMeta):
"""Customized trainer module for all models."""
def __init__(self, steps, epochs, config):
self.steps = steps
self.epochs = epochs
self.config = config
self.finish_train = False
self.writer = tf.summary.create_file_writer(config["outdir"])
self.train_data_loader = None
self.eval_data_loader = None
self.train_metrics = None
self.eval_metrics = None
self.list_metrics_name = None
def init_train_eval_metrics(self, list_metrics_name):
"""Init train and eval metrics to save it to tensorboard."""
self.train_metrics = {}
self.eval_metrics = {}
for name in list_metrics_name:
self.train_metrics.update(
{name: tf.keras.metrics.Mean(name="train_" + name, dtype=tf.float32)}
)
self.eval_metrics.update(
{name: tf.keras.metrics.Mean(name="eval_" + name, dtype=tf.float32)}
)
def reset_states_train(self):
"""Reset train metrics after save it to tensorboard."""
for metric in self.train_metrics.keys():
self.train_metrics[metric].reset_states()
def reset_states_eval(self):
"""Reset eval metrics after save it to tensorboard."""
for metric in self.eval_metrics.keys():
self.eval_metrics[metric].reset_states()
def update_train_metrics(self, dict_metrics_losses):
for name, value in dict_metrics_losses.items():
self.train_metrics[name].update_state(value)
def update_eval_metrics(self, dict_metrics_losses):
for name, value in dict_metrics_losses.items():
self.eval_metrics[name].update_state(value)
def set_train_data_loader(self, train_dataset):
"""Set train data loader (MUST)."""
self.train_data_loader = train_dataset
def get_train_data_loader(self):
"""Get train data loader."""
return self.train_data_loader
def set_eval_data_loader(self, eval_dataset):
"""Set eval data loader (MUST)."""
self.eval_data_loader = eval_dataset
def get_eval_data_loader(self):
"""Get eval data loader."""
return self.eval_data_loader
@abc.abstractmethod
def compile(self):
pass
@abc.abstractmethod
def create_checkpoint_manager(self, saved_path=None, max_to_keep=10):
"""Create checkpoint management."""
pass
def run(self):
"""Run training."""
self.tqdm = tqdm(
initial=self.steps, total=self.config["train_max_steps"], desc="[train]"
)
while True:
self._train_epoch()
if self.finish_train:
break
self.tqdm.close()
logging.info("Finish training.")
@abc.abstractmethod
def save_checkpoint(self):
"""Save checkpoint."""
pass
@abc.abstractmethod
def load_checkpoint(self, pretrained_path):
"""Load checkpoint."""
pass
def _train_epoch(self):
"""Train model one epoch."""
for train_steps_per_epoch, batch in enumerate(self.train_data_loader, 1):
# one step training
self._train_step(batch)
# check interval
self._check_log_interval()
self._check_eval_interval()
self._check_save_interval()
# check wheter training is finished
if self.finish_train:
return
# update
self.epochs += 1
self.train_steps_per_epoch = train_steps_per_epoch
logging.info(
f"(Steps: {self.steps}) Finished {self.epochs} epoch training "
f"({self.train_steps_per_epoch} steps per epoch)."
)
@abc.abstractmethod
def _eval_epoch(self):
"""One epoch evaluation."""
pass
@abc.abstractmethod
def _train_step(self, batch):
"""One step training."""
pass
@abc.abstractmethod
def _check_log_interval(self):
"""Save log interval."""
pass
@abc.abstractmethod
def fit(self):
pass
def _check_eval_interval(self):
"""Evaluation interval step."""
if self.steps % self.config["eval_interval_steps"] == 0:
self._eval_epoch()
def _check_save_interval(self):
"""Save interval checkpoint."""
if self.steps % self.config["save_interval_steps"] == 0:
self.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {self.steps} steps.")
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
pass
def _write_to_tensorboard(self, list_metrics, stage="train"):
"""Write variables to tensorboard."""
with self.writer.as_default():
for key, value in list_metrics.items():
tf.summary.scalar(stage + "/" + key, value.result(), step=self.steps)
self.writer.flush()
class GanBasedTrainer(BasedTrainer):
"""Customized trainer module for GAN TTS training (MelGAN, GAN-TTS, ParallelWaveGAN)."""
def __init__(
self,
steps,
epochs,
config,
strategy,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
"""
super().__init__(steps, epochs, config)
self._is_generator_mixed_precision = is_generator_mixed_precision
self._is_discriminator_mixed_precision = is_discriminator_mixed_precision
self._strategy = strategy
self._already_apply_input_signature = False
self._generator_gradient_accumulator = GradientAccumulator()
self._discriminator_gradient_accumulator = GradientAccumulator()
self._generator_gradient_accumulator.reset()
self._discriminator_gradient_accumulator.reset()
def init_train_eval_metrics(self, list_metrics_name):
with self._strategy.scope():
super().init_train_eval_metrics(list_metrics_name)
def get_n_gpus(self):
return self._strategy.num_replicas_in_sync
def _get_train_element_signature(self):
return self.train_data_loader.element_spec
def _get_eval_element_signature(self):
return self.eval_data_loader.element_spec
def set_gen_model(self, generator_model):
"""Set generator class model (MUST)."""
self._generator = generator_model
def get_gen_model(self):
"""Get generator model."""
return self._generator
def set_dis_model(self, discriminator_model):
"""Set discriminator class model (MUST)."""
self._discriminator = discriminator_model
def get_dis_model(self):
"""Get discriminator model."""
return self._discriminator
def set_gen_optimizer(self, generator_optimizer):
"""Set generator optimizer (MUST)."""
self._gen_optimizer = generator_optimizer
if self._is_generator_mixed_precision:
self._gen_optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
self._gen_optimizer, "dynamic"
)
def get_gen_optimizer(self):
"""Get generator optimizer."""
return self._gen_optimizer
def set_dis_optimizer(self, discriminator_optimizer):
"""Set discriminator optimizer (MUST)."""
self._dis_optimizer = discriminator_optimizer
if self._is_discriminator_mixed_precision:
self._dis_optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
self._dis_optimizer, "dynamic"
)
def get_dis_optimizer(self):
"""Get discriminator optimizer."""
return self._dis_optimizer
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer):
self.set_gen_model(gen_model)
self.set_dis_model(dis_model)
self.set_gen_optimizer(gen_optimizer)
self.set_dis_optimizer(dis_optimizer)
def _train_step(self, batch):
if self._already_apply_input_signature is False:
train_element_signature = self._get_train_element_signature()
eval_element_signature = self._get_eval_element_signature()
self.one_step_forward = tf.function(
self._one_step_forward, input_signature=[train_element_signature]
)
self.one_step_evaluate = tf.function(
self._one_step_evaluate, input_signature=[eval_element_signature]
)
self.one_step_predict = tf.function(
self._one_step_predict, input_signature=[eval_element_signature]
)
self._already_apply_input_signature = True
# run one_step_forward
self.one_step_forward(batch)
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
def _one_step_forward(self, batch):
per_replica_losses = self._strategy.run(
self._one_step_forward_per_replica, args=(batch,)
)
return self._strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None
)
@abc.abstractmethod
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
per_example_losses = 0.0
dict_metrics_losses = {}
return per_example_losses, dict_metrics_losses
@abc.abstractmethod
def compute_per_example_discriminator_losses(self, batch, gen_outputs):
"""Compute per example discriminator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
per_example_losses = 0.0
dict_metrics_losses = {}
return per_example_losses, dict_metrics_losses
def _calculate_generator_gradient_per_batch(self, batch):
outputs = self._generator(**batch, training=True)
(
per_example_losses,
dict_metrics_losses,
) = self.compute_per_example_generator_losses(batch, outputs)
per_replica_gen_losses = tf.nn.compute_average_loss(
per_example_losses,
global_batch_size=self.config["batch_size"]
* self.get_n_gpus()
* self.config["gradient_accumulation_steps"],
)
if self._is_generator_mixed_precision:
scaled_per_replica_gen_losses = self._gen_optimizer.get_scaled_loss(
per_replica_gen_losses
)
if self._is_generator_mixed_precision:
scaled_gradients = tf.gradients(
scaled_per_replica_gen_losses, self._generator.trainable_variables
)
gradients = self._gen_optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = tf.gradients(
per_replica_gen_losses, self._generator.trainable_variables
)
# gradient accumulate for generator here
if self.config["gradient_accumulation_steps"] > 1:
self._generator_gradient_accumulator(gradients)
# accumulate loss into metrics
self.update_train_metrics(dict_metrics_losses)
if self.config["gradient_accumulation_steps"] == 1:
return gradients, per_replica_gen_losses
else:
return per_replica_gen_losses
def _calculate_discriminator_gradient_per_batch(self, batch):
(
per_example_losses,
dict_metrics_losses,
) = self.compute_per_example_discriminator_losses(
batch, self._generator(**batch, training=True)
)
per_replica_dis_losses = tf.nn.compute_average_loss(
per_example_losses,
global_batch_size=self.config["batch_size"]
* self.get_n_gpus()
* self.config["gradient_accumulation_steps"],
)
if self._is_discriminator_mixed_precision:
scaled_per_replica_dis_losses = self._dis_optimizer.get_scaled_loss(
per_replica_dis_losses
)
if self._is_discriminator_mixed_precision:
scaled_gradients = tf.gradients(
scaled_per_replica_dis_losses,
self._discriminator.trainable_variables,
)
gradients = self._dis_optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = tf.gradients(
per_replica_dis_losses, self._discriminator.trainable_variables
)
# accumulate loss into metrics
self.update_train_metrics(dict_metrics_losses)
# gradient accumulate for discriminator here
if self.config["gradient_accumulation_steps"] > 1:
self._discriminator_gradient_accumulator(gradients)
if self.config["gradient_accumulation_steps"] == 1:
return gradients, per_replica_dis_losses
else:
return per_replica_dis_losses
def _one_step_forward_per_replica(self, batch):
per_replica_gen_losses = 0.0
per_replica_dis_losses = 0.0
if self.config["gradient_accumulation_steps"] == 1:
(
gradients,
per_replica_gen_losses,
) = self._calculate_generator_gradient_per_batch(batch)
self._gen_optimizer.apply_gradients(
zip(gradients, self._generator.trainable_variables)
)
else:
# gradient acummulation here.
for i in tf.range(self.config["gradient_accumulation_steps"]):
reduced_batch = {
k: v[
i
* self.config["batch_size"] : (i + 1)
* self.config["batch_size"]
]
for k, v in batch.items()
}
# run 1 step accumulate
reduced_batch_losses = self._calculate_generator_gradient_per_batch(
reduced_batch
)
# sum per_replica_losses
per_replica_gen_losses += reduced_batch_losses
gradients = self._generator_gradient_accumulator.gradients
self._gen_optimizer.apply_gradients(
zip(gradients, self._generator.trainable_variables)
)
self._generator_gradient_accumulator.reset()
# one step discriminator
# recompute y_hat after 1 step generator for discriminator training.
if self.steps >= self.config["discriminator_train_start_steps"]:
if self.config["gradient_accumulation_steps"] == 1:
(
gradients,
per_replica_dis_losses,
) = self._calculate_discriminator_gradient_per_batch(batch)
self._dis_optimizer.apply_gradients(
zip(gradients, self._discriminator.trainable_variables)
)
else:
# gradient acummulation here.
for i in tf.range(self.config["gradient_accumulation_steps"]):
reduced_batch = {
k: v[
i
* self.config["batch_size"] : (i + 1)
* self.config["batch_size"]
]
for k, v in batch.items()
}
# run 1 step accumulate
reduced_batch_losses = (
self._calculate_discriminator_gradient_per_batch(reduced_batch)
)
# sum per_replica_losses
per_replica_dis_losses += reduced_batch_losses
gradients = self._discriminator_gradient_accumulator.gradients
self._dis_optimizer.apply_gradients(
zip(gradients, self._discriminator.trainable_variables)
)
self._discriminator_gradient_accumulator.reset()
return per_replica_gen_losses + per_replica_dis_losses
def _eval_epoch(self):
"""Evaluate model one epoch."""
logging.info(f"(Steps: {self.steps}) Start evaluation.")
# calculate loss for each batch
for eval_steps_per_epoch, batch in enumerate(
tqdm(self.eval_data_loader, desc="[eval]"), 1
):
# eval one step
self.one_step_evaluate(batch)
if eval_steps_per_epoch <= self.config["num_save_intermediate_results"]:
# save intermedia
self.generate_and_save_intermediate_result(batch)
logging.info(
f"(Steps: {self.steps}) Finished evaluation "
f"({eval_steps_per_epoch} steps per epoch)."
)
# average loss
for key in self.eval_metrics.keys():
logging.info(
f"(Steps: {self.steps}) eval_{key} = {self.eval_metrics[key].result():.4f}."
)
# record
self._write_to_tensorboard(self.eval_metrics, stage="eval")
# reset
self.reset_states_eval()
def _one_step_evaluate_per_replica(self, batch):
################################################
# one step generator.
outputs = self._generator(**batch, training=False)
_, dict_metrics_losses = self.compute_per_example_generator_losses(
batch, outputs
)
# accumulate loss into metrics
self.update_eval_metrics(dict_metrics_losses)
################################################
# one step discriminator
if self.steps >= self.config["discriminator_train_start_steps"]:
_, dict_metrics_losses = self.compute_per_example_discriminator_losses(
batch, outputs
)
# accumulate loss into metrics
self.update_eval_metrics(dict_metrics_losses)
################################################
def _one_step_evaluate(self, batch):
self._strategy.run(self._one_step_evaluate_per_replica, args=(batch,))
def _one_step_predict_per_replica(self, batch):
outputs = self._generator(**batch, training=False)
return outputs
def _one_step_predict(self, batch):
outputs = self._strategy.run(self._one_step_predict_per_replica, args=(batch,))
return outputs
@abc.abstractmethod
def generate_and_save_intermediate_result(self, batch):
return
def create_checkpoint_manager(self, saved_path=None, max_to_keep=10):
"""Create checkpoint management."""
if saved_path is None:
saved_path = self.config["outdir"] + "/checkpoints/"
os.makedirs(saved_path, exist_ok=True)
self.saved_path = saved_path
self.ckpt = tf.train.Checkpoint(
steps=tf.Variable(1),
epochs=tf.Variable(1),
gen_optimizer=self.get_gen_optimizer(),
dis_optimizer=self.get_dis_optimizer(),
)
self.ckp_manager = tf.train.CheckpointManager(
self.ckpt, saved_path, max_to_keep=max_to_keep
)
def save_checkpoint(self):
"""Save checkpoint."""
self.ckpt.steps.assign(self.steps)
self.ckpt.epochs.assign(self.epochs)
self.ckp_manager.save(checkpoint_number=self.steps)
utils.save_weights(
self._generator,
self.saved_path + "generator-{}.h5".format(self.steps)
)
utils.save_weights(
self._discriminator,
self.saved_path + "discriminator-{}.h5".format(self.steps)
)
def load_checkpoint(self, pretrained_path):
"""Load checkpoint."""
self.ckpt.restore(pretrained_path)
self.steps = self.ckpt.steps.numpy()
self.epochs = self.ckpt.epochs.numpy()
self._gen_optimizer = self.ckpt.gen_optimizer
# re-assign iterations (global steps) for gen_optimizer.
self._gen_optimizer.iterations.assign(tf.cast(self.steps, tf.int64))
# re-assign iterations (global steps) for dis_optimizer.
try:
discriminator_train_start_steps = self.config[
"discriminator_train_start_steps"
]
discriminator_train_start_steps = tf.math.maximum(
0, self.steps - discriminator_train_start_steps
)
except Exception:
discriminator_train_start_steps = self.steps
self._dis_optimizer = self.ckpt.dis_optimizer
self._dis_optimizer.iterations.assign(
tf.cast(discriminator_train_start_steps, tf.int64)
)
# load weights.
utils.load_weights(
self._generator,
self.saved_path + "generator-{}.h5".format(self.steps)
)
utils.load_weights(
self._discriminator,
self.saved_path + "discriminator-{}.h5".format(self.steps)
)
def _check_train_finish(self):
"""Check training finished."""
if self.steps >= self.config["train_max_steps"]:
self.finish_train = True
if (
self.steps != 0
and self.steps == self.config["discriminator_train_start_steps"]
):
self.finish_train = True
logging.info(
f"Finished training only generator at {self.steps}steps, pls resume and continue training."
)
def _check_log_interval(self):
"""Log to tensorboard."""
if self.steps % self.config["log_interval_steps"] == 0:
for metric_name in self.list_metrics_name:
logging.info(
f"(Step: {self.steps}) train_{metric_name} = {self.train_metrics[metric_name].result():.4f}."
)
self._write_to_tensorboard(self.train_metrics, stage="train")
# reset
self.reset_states_train()
def fit(self, train_data_loader, valid_data_loader, saved_path, resume=None):
self.set_train_data_loader(train_data_loader)
self.set_eval_data_loader(valid_data_loader)
self.train_data_loader = self._strategy.experimental_distribute_dataset(
self.train_data_loader
)
self.eval_data_loader = self._strategy.experimental_distribute_dataset(
self.eval_data_loader
)
with self._strategy.scope():
self.create_checkpoint_manager(saved_path=saved_path, max_to_keep=10000)
if len(resume) > 1:
self.load_checkpoint(resume)
logging.info(f"Successfully resumed from {resume}.")
self.run()
class Seq2SeqBasedTrainer(BasedTrainer, metaclass=abc.ABCMeta):
"""Customized trainer module for Seq2Seq TTS training (Tacotron, FastSpeech)."""
def __init__(
self, steps, epochs, config, strategy, is_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
strategy (tf.distribute): Strategy for distributed training.
is_mixed_precision (bool): Use mixed_precision training or not.
"""
super().__init__(steps, epochs, config)
self._is_mixed_precision = is_mixed_precision
self._strategy = strategy
self._model = None
self._optimizer = None
self._trainable_variables = None
# check if we already apply input_signature for train_step.
self._already_apply_input_signature = False
# create gradient accumulator
self._gradient_accumulator = GradientAccumulator()
self._gradient_accumulator.reset()
def init_train_eval_metrics(self, list_metrics_name):
with self._strategy.scope():
super().init_train_eval_metrics(list_metrics_name)
def set_model(self, model):
"""Set generator class model (MUST)."""
self._model = model
def get_model(self):
"""Get generator model."""
return self._model
def set_optimizer(self, optimizer):
"""Set optimizer (MUST)."""
self._optimizer = optimizer
if self._is_mixed_precision:
self._optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
self._optimizer, "dynamic"
)
def get_optimizer(self):
"""Get optimizer."""
return self._optimizer
def get_n_gpus(self):
return self._strategy.num_replicas_in_sync
def compile(self, model, optimizer):
self.set_model(model)
self.set_optimizer(optimizer)
self._trainable_variables = self._train_vars()
def _train_vars(self):
if self.config["var_train_expr"]:
list_train_var = self.config["var_train_expr"].split("|")
return [
v
for v in self._model.trainable_variables
if self._check_string_exist(list_train_var, v.name)
]
return self._model.trainable_variables
def _check_string_exist(self, list_string, inp_string):
for string in list_string:
if string in inp_string:
return True
return False
def _get_train_element_signature(self):
return self.train_data_loader.element_spec
def _get_eval_element_signature(self):
return self.eval_data_loader.element_spec
def _train_step(self, batch):
if self._already_apply_input_signature is False:
train_element_signature = self._get_train_element_signature()
eval_element_signature = self._get_eval_element_signature()
self.one_step_forward = tf.function(
self._one_step_forward, input_signature=[train_element_signature]
)
self.one_step_evaluate = tf.function(
self._one_step_evaluate, input_signature=[eval_element_signature]
)
self.one_step_predict = tf.function(
self._one_step_predict, input_signature=[eval_element_signature]
)
self._already_apply_input_signature = True
# run one_step_forward
self.one_step_forward(batch)
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
def _one_step_forward(self, batch):
per_replica_losses = self._strategy.run(
self._one_step_forward_per_replica, args=(batch,)
)
return self._strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None
)
def _calculate_gradient_per_batch(self, batch):
outputs = self._model(**batch, training=True)
per_example_losses, dict_metrics_losses = self.compute_per_example_losses(
batch, outputs
)
per_replica_losses = tf.nn.compute_average_loss(
per_example_losses,
global_batch_size=self.config["batch_size"]
* self.get_n_gpus()
* self.config["gradient_accumulation_steps"],
)
if self._is_mixed_precision:
scaled_per_replica_losses = self._optimizer.get_scaled_loss(
per_replica_losses
)
if self._is_mixed_precision:
scaled_gradients = tf.gradients(
scaled_per_replica_losses, self._trainable_variables
)
gradients = self._optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = tf.gradients(per_replica_losses, self._trainable_variables)
# gradient accumulate here
if self.config["gradient_accumulation_steps"] > 1:
self._gradient_accumulator(gradients)
# accumulate loss into metrics
self.update_train_metrics(dict_metrics_losses)
if self.config["gradient_accumulation_steps"] == 1:
return gradients, per_replica_losses
else:
return per_replica_losses
def _one_step_forward_per_replica(self, batch):
if self.config["gradient_accumulation_steps"] == 1:
gradients, per_replica_losses = self._calculate_gradient_per_batch(batch)
self._optimizer.apply_gradients(
zip(gradients, self._trainable_variables), 1.0
)
else:
# gradient acummulation here.
per_replica_losses = 0.0
for i in tf.range(self.config["gradient_accumulation_steps"]):
reduced_batch = {
k: v[
i
* self.config["batch_size"] : (i + 1)
* self.config["batch_size"]
]
for k, v in batch.items()
}
# run 1 step accumulate
reduced_batch_losses = self._calculate_gradient_per_batch(reduced_batch)
# sum per_replica_losses
per_replica_losses += reduced_batch_losses
gradients = self._gradient_accumulator.gradients
self._optimizer.apply_gradients(
zip(gradients, self._trainable_variables), 1.0
)
self._gradient_accumulator.reset()
return per_replica_losses
@abc.abstractmethod
def compute_per_example_losses(self, batch, outputs):
"""Compute per example losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
per_example_losses = 0.0
dict_metrics_losses = {}
return per_example_losses, dict_metrics_losses
def _eval_epoch(self):
"""Evaluate model one epoch."""
logging.info(f"(Steps: {self.steps}) Start evaluation.")
# calculate loss for each batch
for eval_steps_per_epoch, batch in enumerate(
tqdm(self.eval_data_loader, desc="[eval]"), 1
):
# eval one step
self.one_step_evaluate(batch)
if eval_steps_per_epoch <= self.config["num_save_intermediate_results"]:
# save intermedia
self.generate_and_save_intermediate_result(batch)
logging.info(
f"(Steps: {self.steps}) Finished evaluation "
f"({eval_steps_per_epoch} steps per epoch)."
)
# average loss
for key in self.eval_metrics.keys():
logging.info(
f"(Steps: {self.steps}) eval_{key} = {self.eval_metrics[key].result():.4f}."
)
# record
self._write_to_tensorboard(self.eval_metrics, stage="eval")
# reset
self.reset_states_eval()
def _one_step_evaluate_per_replica(self, batch):
outputs = self._model(**batch, training=False)
_, dict_metrics_losses = self.compute_per_example_losses(batch, outputs)
self.update_eval_metrics(dict_metrics_losses)
def _one_step_evaluate(self, batch):
self._strategy.run(self._one_step_evaluate_per_replica, args=(batch,))
def _one_step_predict_per_replica(self, batch):
outputs = self._model(**batch, training=False)
return outputs
def _one_step_predict(self, batch):
outputs = self._strategy.run(self._one_step_predict_per_replica, args=(batch,))
return outputs
@abc.abstractmethod
def generate_and_save_intermediate_result(self, batch):
return
def create_checkpoint_manager(self, saved_path=None, max_to_keep=10):
"""Create checkpoint management."""
if saved_path is None:
saved_path = self.config["outdir"] + "/checkpoints/"
os.makedirs(saved_path, exist_ok=True)
self.saved_path = saved_path
self.ckpt = tf.train.Checkpoint(
steps=tf.Variable(1), epochs=tf.Variable(1), optimizer=self.get_optimizer()
)
self.ckp_manager = tf.train.CheckpointManager(
self.ckpt, saved_path, max_to_keep=max_to_keep
)
def save_checkpoint(self):
"""Save checkpoint."""
self.ckpt.steps.assign(self.steps)
self.ckpt.epochs.assign(self.epochs)
self.ckp_manager.save(checkpoint_number=self.steps)
utils.save_weights(
self._model,
self.saved_path + "model-{}.h5".format(self.steps)
)
def load_checkpoint(self, pretrained_path):
"""Load checkpoint."""
self.ckpt.restore(pretrained_path)
self.steps = self.ckpt.steps.numpy()
self.epochs = self.ckpt.epochs.numpy()
self._optimizer = self.ckpt.optimizer
# re-assign iterations (global steps) for optimizer.
self._optimizer.iterations.assign(tf.cast(self.steps, tf.int64))
# load weights.
utils.load_weights(
self._model,
self.saved_path + "model-{}.h5".format(self.steps)
)
def _check_train_finish(self):
"""Check training finished."""
if self.steps >= self.config["train_max_steps"]:
self.finish_train = True
def _check_log_interval(self):
"""Log to tensorboard."""
if self.steps % self.config["log_interval_steps"] == 0:
for metric_name in self.list_metrics_name:
logging.info(
f"(Step: {self.steps}) train_{metric_name} = {self.train_metrics[metric_name].result():.4f}."
)
self._write_to_tensorboard(self.train_metrics, stage="train")
# reset
self.reset_states_train()
def fit(self, train_data_loader, valid_data_loader, saved_path, resume=None):
self.set_train_data_loader(train_data_loader)
self.set_eval_data_loader(valid_data_loader)
self.train_data_loader = self._strategy.experimental_distribute_dataset(
self.train_data_loader
)
self.eval_data_loader = self._strategy.experimental_distribute_dataset(
self.eval_data_loader
)
with self._strategy.scope():
self.create_checkpoint_manager(saved_path=saved_path, max_to_keep=10000)
if len(resume) > 1:
self.load_checkpoint(resume)
logging.info(f"Successfully resumed from {resume}.")
self.run()
| 36,562 | 35.165183 | 113 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_fastspeech.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pytest
import tensorflow as tf
from tensorflow_tts.configs import FastSpeechConfig
from tensorflow_tts.models import TFFastSpeech
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@pytest.mark.parametrize("new_size", [100, 200, 300])
def test_fastspeech_resize_positional_embeddings(new_size):
config = FastSpeechConfig()
fastspeech = TFFastSpeech(config, name="fastspeech")
fastspeech._build()
fastspeech.save_weights("./test.h5")
fastspeech.resize_positional_embeddings(new_size)
fastspeech.load_weights("./test.h5", by_name=True, skip_mismatch=True)
@pytest.mark.parametrize("num_hidden_layers,n_speakers", [(2, 1), (3, 2), (4, 3)])
def test_fastspeech_trainable(num_hidden_layers, n_speakers):
config = FastSpeechConfig(
encoder_num_hidden_layers=num_hidden_layers,
decoder_num_hidden_layers=num_hidden_layers + 1,
n_speakers=n_speakers,
)
fastspeech = TFFastSpeech(config, name="fastspeech")
optimizer = tf.keras.optimizers.Adam(lr=0.001)
# fake inputs
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
attention_mask = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
mel_gts = tf.random.uniform(shape=[1, 10, 80], dtype=tf.float32)
@tf.function
def one_step_training():
with tf.GradientTape() as tape:
mel_outputs_before, _, duration_outputs = fastspeech(
input_ids, speaker_ids, duration_gts, training=True
)
duration_loss = tf.keras.losses.MeanSquaredError()(
duration_gts, duration_outputs
)
mel_loss = tf.keras.losses.MeanSquaredError()(mel_gts, mel_outputs_before)
loss = duration_loss + mel_loss
gradients = tape.gradient(loss, fastspeech.trainable_variables)
optimizer.apply_gradients(zip(gradients, fastspeech.trainable_variables))
tf.print(loss)
import time
for i in range(2):
if i == 1:
start = time.time()
one_step_training()
print(time.time() - start)
| 2,995 | 34.247059 | 86 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_tacotron2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
import yaml
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_tts.configs import Tacotron2Config
from tensorflow_tts.models import TFTacotron2
from tensorflow_tts.utils import return_strategy
from examples.tacotron2.train_tacotron2 import Tacotron2Trainer
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
logging.basicConfig(
level=logging.WARNING,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@pytest.mark.parametrize(
"var_train_expr, config_path",
[
("embeddings|decoder_cell", "./examples/tacotron2/conf/tacotron2.v1.yaml"),
(None, "./examples/tacotron2/conf/tacotron2.v1.yaml"),
(
"embeddings|decoder_cell",
"./examples/tacotron2/conf/tacotron2.baker.v1.yaml",
),
("embeddings|decoder_cell", "./examples/tacotron2/conf/tacotron2.kss.v1.yaml"),
],
)
def test_tacotron2_train_some_layers(var_train_expr, config_path):
config = Tacotron2Config(n_speakers=5, reduction_factor=1)
model = TFTacotron2(config, name="tacotron2")
model._build()
optimizer = tf.keras.optimizers.Adam(lr=0.001)
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update({"outdir": "./"})
config.update({"var_train_expr": var_train_expr})
STRATEGY = return_strategy()
trainer = Tacotron2Trainer(
config=config, strategy=STRATEGY, steps=0, epochs=0, is_mixed_precision=False,
)
trainer.compile(model, optimizer)
len_trainable_vars = len(trainer._trainable_variables)
all_trainable_vars = len(model.trainable_variables)
if var_train_expr is None:
tf.debugging.assert_equal(len_trainable_vars, all_trainable_vars)
else:
tf.debugging.assert_less(len_trainable_vars, all_trainable_vars)
@pytest.mark.parametrize(
"n_speakers, n_chars, max_input_length, max_mel_length, batch_size",
[(2, 15, 25, 50, 2),],
)
def test_tacotron2_trainable(
n_speakers, n_chars, max_input_length, max_mel_length, batch_size
):
config = Tacotron2Config(n_speakers=n_speakers, reduction_factor=1)
model = TFTacotron2(config, name="tacotron2")
model._build()
# fake input
input_ids = tf.random.uniform(
[batch_size, max_input_length], maxval=n_chars, dtype=tf.int32
)
speaker_ids = tf.convert_to_tensor([0] * batch_size, tf.int32)
mel_gts = tf.random.uniform(shape=[batch_size, max_mel_length, 80])
mel_lengths = np.random.randint(
max_mel_length, high=max_mel_length + 1, size=[batch_size]
)
mel_lengths[-1] = max_mel_length
mel_lengths = tf.convert_to_tensor(mel_lengths, dtype=tf.int32)
stop_tokens = np.zeros((batch_size, max_mel_length), np.float32)
stop_tokens = tf.convert_to_tensor(stop_tokens)
optimizer = tf.keras.optimizers.Adam(lr=0.001)
binary_crossentropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function(experimental_relax_shapes=True)
def one_step_training(input_ids, speaker_ids, mel_gts, mel_lengths):
with tf.GradientTape() as tape:
mel_preds, post_mel_preds, stop_preds, alignment_history = model(
input_ids,
tf.constant([max_input_length, max_input_length]),
speaker_ids,
mel_gts,
mel_lengths,
training=True,
)
loss_before = tf.keras.losses.MeanSquaredError()(mel_gts, mel_preds)
loss_after = tf.keras.losses.MeanSquaredError()(mel_gts, post_mel_preds)
stop_gts = tf.expand_dims(
tf.range(tf.reduce_max(mel_lengths), dtype=tf.int32), 0
) # [1, max_len]
stop_gts = tf.tile(stop_gts, [tf.shape(mel_lengths)[0], 1]) # [B, max_len]
stop_gts = tf.cast(
tf.math.greater_equal(stop_gts, tf.expand_dims(mel_lengths, 1) - 1),
tf.float32,
)
# calculate stop_token loss
stop_token_loss = binary_crossentropy(stop_gts, stop_preds)
loss = stop_token_loss + loss_before + loss_after
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss, alignment_history
for i in range(2):
if i == 1:
start = time.time()
loss, alignment_history = one_step_training(
input_ids, speaker_ids, mel_gts, mel_lengths
)
print(f" > loss: {loss}")
total_runtime = time.time() - start
print(f" > Total run-time: {total_runtime}")
print(f" > Avg run-time: {total_runtime/10}")
| 5,329 | 34.533333 | 87 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_melgan_layers.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_tts.models.melgan import (
TFConvTranspose1d,
TFReflectionPad1d,
TFResidualStack,
)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@pytest.mark.parametrize("padding_size", [(3), (5)])
def test_padding(padding_size):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
out = TFReflectionPad1d(padding_size=padding_size)(fake_input_1d)
assert np.array_equal(
tf.keras.backend.int_shape(out), [4, 8000 + 2 * padding_size, 256]
)
@pytest.mark.parametrize(
"filters,kernel_size,strides,padding,is_weight_norm",
[(512, 40, 8, "same", False), (768, 15, 8, "same", True)],
)
def test_convtranpose1d(filters, kernel_size, strides, padding, is_weight_norm):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
conv1d_transpose = TFConvTranspose1d(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
is_weight_norm=is_weight_norm,
initializer_seed=42,
)
out = conv1d_transpose(fake_input_1d)
assert np.array_equal(tf.keras.backend.int_shape(out), [4, 8000 * strides, filters])
@pytest.mark.parametrize(
"kernel_size,filters,dilation_rate,use_bias,nonlinear_activation,nonlinear_activation_params,is_weight_norm",
[
(3, 256, 1, True, "LeakyReLU", {"alpha": 0.3}, True),
(3, 256, 3, True, "ReLU", {}, False),
],
)
def test_residualblock(
kernel_size,
filters,
dilation_rate,
use_bias,
nonlinear_activation,
nonlinear_activation_params,
is_weight_norm,
):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
residual_block = TFResidualStack(
kernel_size=kernel_size,
filters=filters,
dilation_rate=dilation_rate,
use_bias=use_bias,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
is_weight_norm=is_weight_norm,
initializer_seed=42,
)
out = residual_block(fake_input_1d)
assert np.array_equal(tf.keras.backend.int_shape(out), [4, 8000, filters])
| 2,965 | 30.892473 | 113 | py |
TensorFlowTTS | TensorFlowTTS-master/test/test_fastspeech2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import yaml
import pytest
import tensorflow as tf
from tensorflow_tts.configs import FastSpeech2Config
from tensorflow_tts.models import TFFastSpeech2
from tensorflow_tts.utils import return_strategy
from examples.fastspeech2.train_fastspeech2 import FastSpeech2Trainer
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@pytest.mark.parametrize("new_size", [100, 200, 300])
def test_fastspeech_resize_positional_embeddings(new_size):
config = FastSpeech2Config()
fastspeech2 = TFFastSpeech2(config, name="fastspeech")
fastspeech2._build()
fastspeech2.save_weights("./test.h5")
fastspeech2.resize_positional_embeddings(new_size)
fastspeech2.load_weights("./test.h5", by_name=True, skip_mismatch=True)
@pytest.mark.parametrize(
"var_train_expr, config_path",
[
(None, "./examples/fastspeech2/conf/fastspeech2.v1.yaml"),
("embeddings|encoder", "./examples/fastspeech2/conf/fastspeech2.v1.yaml"),
("embeddings|encoder", "./examples/fastspeech2/conf/fastspeech2.v2.yaml"),
("embeddings|encoder", "./examples/fastspeech2/conf/fastspeech2.baker.v2.yaml"),
("embeddings|encoder", "./examples/fastspeech2/conf/fastspeech2.kss.v1.yaml"),
("embeddings|encoder", "./examples/fastspeech2/conf/fastspeech2.kss.v2.yaml"),
],
)
def test_fastspeech2_train_some_layers(var_train_expr, config_path):
config = FastSpeech2Config(n_speakers=5)
model = TFFastSpeech2(config)
model._build()
optimizer = tf.keras.optimizers.Adam(lr=0.001)
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update({"outdir": "./"})
config.update({"var_train_expr": var_train_expr})
STRATEGY = return_strategy()
trainer = FastSpeech2Trainer(
config=config, strategy=STRATEGY, steps=0, epochs=0, is_mixed_precision=False,
)
trainer.compile(model, optimizer)
len_trainable_vars = len(trainer._trainable_variables)
all_trainable_vars = len(model.trainable_variables)
if var_train_expr is None:
tf.debugging.assert_equal(len_trainable_vars, all_trainable_vars)
else:
tf.debugging.assert_less(len_trainable_vars, all_trainable_vars)
@pytest.mark.parametrize("num_hidden_layers,n_speakers", [(2, 1), (3, 2), (4, 3)])
def test_fastspeech_trainable(num_hidden_layers, n_speakers):
config = FastSpeech2Config(
encoder_num_hidden_layers=num_hidden_layers,
decoder_num_hidden_layers=num_hidden_layers + 1,
n_speakers=n_speakers,
)
fastspeech2 = TFFastSpeech2(config, name="fastspeech")
optimizer = tf.keras.optimizers.Adam(lr=0.001)
# fake inputs
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
attention_mask = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
f0_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.float32)
energy_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.float32)
mel_gts = tf.random.uniform(shape=[1, 10, 80], dtype=tf.float32)
@tf.function
def one_step_training():
with tf.GradientTape() as tape:
mel_outputs_before, _, duration_outputs, _, _ = fastspeech2(
input_ids, speaker_ids, duration_gts, f0_gts, energy_gts, training=True,
)
duration_loss = tf.keras.losses.MeanSquaredError()(
duration_gts, duration_outputs
)
mel_loss = tf.keras.losses.MeanSquaredError()(mel_gts, mel_outputs_before)
loss = duration_loss + mel_loss
gradients = tape.gradient(loss, fastspeech2.trainable_variables)
optimizer.apply_gradients(zip(gradients, fastspeech2.trainable_variables))
tf.print(loss)
import time
for i in range(2):
if i == 1:
start = time.time()
one_step_training()
print(time.time() - start)
| 4,805 | 35.969231 | 88 | py |
sgmcmc_ssm_code | sgmcmc_ssm_code-master/sgmcmc_ssm/sgmcmc_sampler.py | import numpy as np
import pandas as pd
import time
from datetime import timedelta
import logging
from .evaluator import BaseEvaluator
logger = logging.getLogger(name=__name__)
NOISE_NUGGET=1e-9
# SGMCMCSampler
class SGMCMCSampler(object):
""" Base Class for SGMCMC with Time Series """
def __init__(self, **kwargs):
raise NotImplementedError()
## Init Functions
def setup(self, **kwargs):
# Depreciated
raise NotImplementedError()
def prior_init(self):
self.parameters = self.prior.sample_prior()
return self.parameters
## Loglikelihood Functions
def exact_loglikelihood(self, tqdm=None):
""" Return the exact loglikelihood given the current parameters """
loglikelihood = self.message_helper.marginal_loglikelihood(
observations=self.observations,
parameters=self.parameters,
forward_message=self.forward_message,
backward_message=self.backward_message,
tqdm=tqdm,
)
return loglikelihood
def exact_logjoint(self, return_loglike=False, tqdm=None):
""" Return the loglikelihood + logprior given the current parameters """
loglikelihood = self.exact_loglikelihood(tqdm=tqdm)
logprior = self.prior.logprior(self.parameters)
if return_loglike:
return dict(
logjoint=loglikelihood + logprior,
loglikelihood=loglikelihood,
)
else:
return loglikelihood + logprior
def predictive_loglikelihood(self, kind='marginal', num_steps_ahead=10,
subsequence_length=-1, minibatch_size=1, buffer_length=10,
num_samples=1000, parameters=None, observations=None,
**kwargs):
""" Return the predictive loglikelihood given the parameters """
if parameters is None:
parameters = self.parameters
observations = self._get_observations(observations)
T = observations.shape[0]
if kind == 'marginal':
pred_loglikelihood = 0.0
for s in range(0, minibatch_size):
out = self._random_subsequence_and_buffers(buffer_length,
subsequence_length=subsequence_length,
T=T)
forward_message = self.message_helper.forward_message(
observations[
out['left_buffer_start']:out['subsequence_start']
],
self.parameters,
forward_message=self.forward_message,
tqdm=kwargs.get('tqdm', None),
)
# Noisy Loglikelihood should use only forward pass
# E.g. log Pr(y) \approx \sum_s log Pr(y_s | y<min(s))
pred_loglikelihood_S = (
self.message_helper.predictive_loglikelihood(
observations=observations,
parameters=parameters,
forward_message=forward_message,
backward_message=self.backward_message,
lag=num_steps_ahead,
tqdm=kwargs.get('tqdm', None),
))
pred_loglikelihood += (
pred_loglikelihood_S * (T-num_steps_ahead)/(
out['subsequence_end'] - out['subsequence_start'] - \
num_steps_ahead
))
pred_loglikelihood *= 1.0/minibatch_size
return pred_loglikelihood
elif kind == 'pf':
if kwargs.get("N", None) is None:
kwargs['N'] = num_samples
pred_loglikelihood = np.zeros(num_steps_ahead+1)
for s in range(0, minibatch_size):
out = self._random_subsequence_and_buffers(
buffer_length=buffer_length,
subsequence_length=subsequence_length,
T=T)
relative_start = (out['subsequence_start'] -
out['left_buffer_start'])
relative_end = (out['subsequence_end'] -
out['left_buffer_start'])
buffer_ = observations[
out['left_buffer_start']:
out['right_buffer_end']
]
pred_loglike_add = (
self.message_helper
.pf_predictive_loglikelihood_estimate(
observations=buffer_,
parameters=self.parameters,
num_steps_ahead=num_steps_ahead,
subsequence_start=relative_start,
subsequence_end=relative_end,
**kwargs)
)
for ll in range(num_steps_ahead+1):
pred_loglikelihood[ll] += pred_loglike_add[ll] * (T-ll)/(
out['subsequence_end'] - out['subsequence_start']-ll
)
pred_loglikelihood *= 1.0/minibatch_size
return pred_loglikelihood
else:
raise ValueError("Unrecognized kind = {0}".format(kind))
def noisy_loglikelihood(self, kind='marginal',
subsequence_length=-1, minibatch_size=1, buffer_length=10,
num_samples=None, observations=None,
**kwargs):
""" Subsequence Approximation to loglikelihood
Args:
kind (string): how to estimate the loglikelihood
subsequence_length (int): length of subsequence used in evaluation
minibatch_size (int): number of subsequences
buffer_length (int): length of each subsequence buffer
"""
observations = self._get_observations(observations)
T = observations.shape[0]
noisy_loglikelihood = 0.0
if kind == 'marginal':
for s in range(0, minibatch_size):
out = self._random_subsequence_and_buffers(buffer_length,
subsequence_length=subsequence_length,
T=T)
forward_message = self.message_helper.forward_message(
observations[
out['left_buffer_start']:out['subsequence_start']
],
self.parameters,
forward_message=self.forward_message,
tqdm=kwargs.get('tqdm', None),
)
# Noisy Loglikelihood should use only forward pass
# E.g. log Pr(y) \approx \sum_s log Pr(y_s | y<min(s))
noisy_loglikelihood += (
self.message_helper.marginal_loglikelihood(
observations=observations[
out['subsequence_start']:out['subsequence_end']
],
parameters=self.parameters,
weights=out['weights'],
forward_message=forward_message,
backward_message=self.backward_message,
tqdm=kwargs.get('tqdm', None),
) - forward_message['log_constant']
)
elif kind == 'complete':
for s in range(0, minibatch_size):
out = self._random_subsequence_and_buffers(
buffer_length=buffer_length,
subsequence_length=subsequence_length,
T=T)
buffer_ = observations[
out['left_buffer_start']:out['right_buffer_end']
]
# Draw Samples:
latent_buffer = self.sample_x(
parameters=self.parameters,
observations=buffer_,
num_samples=num_samples,
)
relative_start = out['subsequence_start']-out['left_buffer_start']
relative_end = out['subsequence_end']-out['left_buffer_start']
forward_message = {}
if relative_start > 0:
forward_message = dict(
x_prev = latent_buffer[relative_start-1]
)
noisy_loglikelihood += \
self.message_helper.complete_data_loglikelihood(
observations=observations[
out['subsequence_start']:out['subsequence_end']
],
latent_vars=latent_buffer[relative_start:relative_end],
weights=out['weights'],
parameters=self.parameters,
forward_message=forward_message,
)
elif kind == 'pf':
if kwargs.get("N", None) is None:
kwargs['N'] = num_samples
noisy_loglikelihood = 0.0
for s in range(0, minibatch_size):
out = self._random_subsequence_and_buffers(
buffer_length=buffer_length,
subsequence_length=subsequence_length,
T=T)
relative_start = (out['subsequence_start'] -
out['left_buffer_start'])
relative_end = (out['subsequence_end'] -
out['left_buffer_start'])
buffer_ = observations[
out['left_buffer_start']:
out['right_buffer_end']
]
noisy_loglikelihood += (
self.message_helper
.pf_loglikelihood_estimate(
observations=buffer_,
parameters=self.parameters,
weights=out['weights'],
subsequence_start=relative_start,
subsequence_end=relative_end,
**kwargs)
)
else:
raise ValueError("Unrecognized kind = {0}".format(kind))
noisy_loglikelihood *= 1.0/minibatch_size
if np.isnan(noisy_loglikelihood):
raise ValueError("NaNs in loglikelihood")
return noisy_loglikelihood
def noisy_logjoint(self, return_loglike=False, **kwargs):
""" Return the loglikelihood + logprior given the current parameters """
loglikelihood = self.noisy_loglikelihood(**kwargs)
logprior = self.prior.logprior(self.parameters)
if return_loglike:
return dict(
logjoint=loglikelihood + logprior,
loglikelihood=loglikelihood,
)
else:
return loglikelihood + logprior
## Gradient Functions
def _random_subsequence_and_buffers(self, buffer_length,
subsequence_length, T=None):
""" Get a subsequence and the forward and backward message approx"""
if T is None:
T = self._get_T()
if buffer_length == -1:
buffer_length = T
if (subsequence_length == -1) or (T-subsequence_length <= 0):
subsequence_start = 0
subsequence_end = T
weights = None
else:
subsequence_start, subsequence_end, weights = \
random_subsequence_and_weights(
S=subsequence_length,
T=T,
partition_style=self.options.get('partition_style'),
)
left_buffer_start = max(0, subsequence_start - buffer_length)
right_buffer_end = min(T, subsequence_end + buffer_length)
out = dict(
subsequence_start = subsequence_start,
subsequence_end = subsequence_end,
left_buffer_start = left_buffer_start,
right_buffer_end = right_buffer_end,
weights = weights,
)
return out
def _single_noisy_grad_loglikelihood(self, buffer_dict, kind='marginal',
num_samples=None, observations=None, parameters=None, **kwargs):
# buffer_dict is the output of _random_subsequence_and_buffers
observations = self._get_observations(observations, check_shape=False)
if parameters is None:
parameters = self.parameters
T = observations.shape[0]
if kind == 'marginal':
forward_message = self.message_helper.forward_message(
observations[
buffer_dict['left_buffer_start']:
buffer_dict['subsequence_start']
],
parameters,
forward_message=self.forward_message)
backward_message = self.message_helper.backward_message(
observations[
buffer_dict['subsequence_end']:
buffer_dict['right_buffer_end']
],
parameters,
backward_message=self.backward_message,
)
noisy_grad = (
self.message_helper
.gradient_marginal_loglikelihood(
observations=observations[
buffer_dict['subsequence_start']:
buffer_dict['subsequence_end']
],
parameters=parameters,
weights=buffer_dict['weights'],
forward_message=forward_message,
backward_message=backward_message,
**kwargs
)
)
elif kind == 'complete':
buffer_ = observations[
buffer_dict['left_buffer_start']:
buffer_dict['right_buffer_end']
]
# Draw Samples:
latent_buffer = self.sample_x(
parameters=parameters,
observations=buffer_,
num_samples=num_samples,
)
relative_start = (buffer_dict['subsequence_start'] -
buffer_dict['left_buffer_start'])
relative_end = (buffer_dict['subsequence_end'] -
buffer_dict['left_buffer_start'])
forward_message = {}
if relative_start > 0:
forward_message = dict(
x_prev = latent_buffer[relative_start-1]
)
noisy_grad = (
self.message_helper
.gradient_complete_data_loglikelihood(
observations=observations[
buffer_dict['subsequence_start']:
buffer_dict['subsequence_end']
],
latent_vars=latent_buffer[relative_start:relative_end],
parameters=parameters,
weights=buffer_dict['weights'],
forward_message=forward_message,
**kwargs)
)
elif kind == 'pf':
if kwargs.get("N", None) is None:
kwargs['N'] = num_samples
relative_start = (buffer_dict['subsequence_start'] -
buffer_dict['left_buffer_start'])
relative_end = (buffer_dict['subsequence_end'] -
buffer_dict['left_buffer_start'])
buffer_ = observations[
buffer_dict['left_buffer_start']:
buffer_dict['right_buffer_end']
]
noisy_grad = (
self.message_helper
.pf_gradient_estimate(
observations=buffer_,
parameters=self.parameters,
subsequence_start=relative_start,
subsequence_end=relative_end,
weights=buffer_dict['weights'],
**kwargs)
)
else:
raise ValueError("Unrecognized kind = {0}".format(kind))
return noisy_grad
def _noisy_grad_loglikelihood(self,
subsequence_length=-1, minibatch_size=1, buffer_length=0,
observations=None, buffer_dicts=None, **kwargs):
observations = self._get_observations(observations, check_shape=False)
T = observations.shape[0]
if buffer_dicts is None:
buffer_dicts = [
self._random_subsequence_and_buffers(
buffer_length=buffer_length,
subsequence_length=subsequence_length,
T=T)
for _ in range(minibatch_size)
]
elif len(buffer_dicts) != minibatch_size:
raise ValueError("len(buffer_dicts != minibatch_size")
noisy_grad = {var: np.zeros_like(value)
for var, value in self.parameters.as_dict().items()}
for s in range(0, minibatch_size):
noisy_grad_add = self._single_noisy_grad_loglikelihood(
buffer_dict=buffer_dicts[s],
observations=observations,
**kwargs,
)
for var in noisy_grad:
noisy_grad[var] += noisy_grad_add[var] * 1.0/minibatch_size
if np.any(np.isnan(noisy_grad[var])):
raise ValueError("NaNs in gradient of {0}".format(var))
if np.linalg.norm(noisy_grad[var]) > 1e16:
logger.warning("Norm of noisy_grad_loglike[{1} > 1e16: {0}".format(
noisy_grad[var], var))
return noisy_grad
def noisy_gradient(self, preconditioner=None, is_scaled=True, **kwargs):
""" Noisy Gradient Estimate
noisy_gradient = -grad tilde{U}(theta)
= grad marginal loglike + grad logprior
Monte Carlo Estimate of gradient (using buffering)
Args:
preconditioner (object): preconditioner for gradients
is_scaled (boolean): scale gradient by 1/T
**kwargs: arguments for `self._noisy_grad_loglikelihood()`
For example: minibatch_size, buffer_length, use_analytic
Returns:
noisy_gradient (dict): dict of gradient vectors
"""
noisy_grad_loglike = \
self._noisy_grad_loglikelihood(**kwargs)
noisy_grad_prior = self.prior.grad_logprior(
parameters=kwargs.get('parameters',self.parameters),
**kwargs
)
noisy_gradient = {var: noisy_grad_prior[var] + noisy_grad_loglike[var]
for var in noisy_grad_prior}
if preconditioner is None:
if is_scaled:
for var in noisy_gradient:
noisy_gradient[var] /= self._get_T(**kwargs)
else:
scale = 1.0/self._get_T(**kwargs) if is_scaled else 1.0
noisy_gradient = preconditioner.precondition(noisy_gradient,
parameters=kwargs.get('parameters',self.parameters),
scale=scale)
return noisy_gradient
## Sampler/Optimizer Step Functions
def step_sgd(self, epsilon, **kwargs):
""" One step of Stochastic Gradient Descent
(Learns the MAP, not a sample from the posterior)
Args:
epsilon (double): step size
**kwargs (kwargs): to pass to self.noisy_gradient
minibatch_size (int): number of subsequences to sample from
buffer_length (int): length of buffer to use
Returns:
parameters (Parameters): sampled parameters after one step
"""
delta = self.noisy_gradient(**kwargs)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += epsilon * delta[var]
return self.parameters
def step_precondition_sgd(self, epsilon, preconditioner, **kwargs):
""" One Step of Preconditioned Stochastic Gradient Descent
Args:
epsilon (double): step size
preconditioner (object): preconditioner
**kwargs (kwargs): to pass to self.noisy_gradient
minibatch_size (int): number of subsequences to sample from
buffer_length (int): length of buffer to use
Returns:
parameters (Parameters): sampled parameters after one step
"""
delta = self.noisy_gradient(preconditioner=preconditioner, **kwargs)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += epsilon * delta[var]
return self.parameters
def step_adagrad(self, epsilon, **kwargs):
""" One step of adagrad
(Learns the MAP, not a sample from the posterior)
Args:
epsilon (double): step size
**kwargs (kwargs): to pass to self.noisy_gradient
"""
if not hasattr(self, "_adagrad_moments"):
self._adagrad_moments = dict(t=0, G=0.0)
g = self.parameters.from_dict_to_vector(self.noisy_gradient(**kwargs))
t = self._adagrad_moments['t'] + 1
G = self._adagrad_moments['G'] + g**2
delta_vec = g/np.sqrt(G + NOISE_NUGGET)
delta = self.parameters.from_vector_to_dict(delta_vec,
**self.parameters.dim)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += epsilon * delta[var]
self._adagrad_moments['t'] = t
self._adagrad_moments['G'] = G
return self.parameters
def _get_sgmcmc_noise(self, is_scaled=True, preconditioner=None,
**kwargs):
if is_scaled:
scale = 1.0 / self._get_T(**kwargs)
else:
scale = 1.0
if preconditioner is not None:
white_noise = preconditioner.precondition_noise(
parameters=self.parameters,
scale=scale,
)
else:
white_noise = {var: np.random.normal(
loc=0,
scale=np.sqrt(scale),
size=value.shape
) for var, value in self.parameters.as_dict().items()}
return white_noise
def sample_sgld(self, epsilon, **kwargs):
""" One Step of Stochastic Gradient Langevin Dynamics
Args:
epsilon (double): step size
**kwargs (kwargs): to pass to self.noisy_gradient
Returns:
parameters (Parameters): sampled parameters after one step
"""
if "preconditioner" in kwargs:
raise ValueError("Use SGRLD instead")
delta = self.noisy_gradient(**kwargs)
white_noise = self._get_sgmcmc_noise(**kwargs)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += \
epsilon * delta[var] + np.sqrt(2.0*epsilon) * white_noise[var]
return self.parameters
def sample_sgld_cv(self, epsilon, centering_parameters, centering_gradient,
**kwargs):
""" One Step of Stochastic Gradient Langevin Dynamics with Control Variates
grad = full_gradient(centering_parameters) + \
sub_gradient(parameters) - sub_gradient(centering_gradient)
Args:
epsilon (double): step size
centering_parameters (Parameters): centering parameters
centering_gradient (dict): full data grad of centering_parameters
**kwargs (kwargs): to pass to self.noisy_gradient
Returns:
parameters (Parameters): sampled parameters after one step
"""
if "preconditioner" in kwargs:
raise ValueError("Use SGRLD instead")
buffer_dicts = [
self._random_subsequence_and_buffers(
buffer_length=kwargs.get('buffer_length', 0),
subsequence_length=kwargs.get('subsequence_length', -1),
T=self._get_T(**kwargs),
)
for _ in range(kwargs.get('minibatch_size', 1))
]
cur_subseq_grad = self.noisy_gradient(
buffer_dicts=buffer_dicts, **kwargs)
centering_subseq_grad = self.noisy_gradient(
parameters=centering_parameters,
buffer_dicts=buffer_dicts, **kwargs)
delta = {}
for var in cur_subseq_grad.keys():
delta[var] = centering_gradient[var] + \
cur_subseq_grad[var] - centering_subseq_grad[var]
white_noise = self._get_sgmcmc_noise(**kwargs)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += \
epsilon * delta[var] + np.sqrt(2.0*epsilon) * white_noise[var]
return self.parameters
def sample_sgrld(self, epsilon, preconditioner, **kwargs):
""" One Step of Stochastic Gradient Riemannian Langevin Dynamics
theta += epsilon * (D(theta) * grad_logjoint + correction_term) + \
N(0, 2 epsilon D(theta))
Args:
epsilon (double): step size
preconditioner (object): preconditioner
Returns:
parameters (Parameters): sampled parameters after one step
"""
if kwargs.get("is_scaled", True):
scale = 1.0 / self._get_T(**kwargs)
else:
scale = 1.0
delta = self.noisy_gradient(preconditioner=preconditioner, **kwargs)
white_noise = self._get_sgmcmc_noise(
preconditioner=preconditioner, **kwargs)
correction = preconditioner.correction_term(
self.parameters, scale=scale)
for var in self.parameters.var_dict:
self.parameters.var_dict[var] += \
epsilon * (delta[var] + correction[var]) + \
np.sqrt(2.0*epsilon) * white_noise[var]
return self.parameters
def sample_gibbs(self):
""" One Step of Blocked Gibbs Sampler
Returns:
parameters (Parameters): sampled parameters after one step
"""
raise NotImplementedError()
def project_parameters(self, **kwargs):
""" Project parameters to valid values + fix constants
See **kwargs in __init__ for more details
"""
self.parameters.project_parameters(**self.options, **kwargs)
return self.parameters
## Fit Functions
def fit(self, iter_type, num_iters, output_all=False, observations=None,
init_parameters=None, tqdm=None, catch_interrupt=False, **kwargs):
""" Run multiple learning / inference steps
Args:
iter_type (string):
'SGD', 'ADAGRAD', 'SGLD', 'SGRLD', 'Gibbs', etc.
num_iters (int): number of steps
output_all (bool): whether to output each iteration's parameters
observations (ndarray): observations to fit on, optional
init_parameters (Parameters): initial parameters, optional
tqdm (tqdm): progress bar wrapper
catch_interrupt (bool): terminate early on Ctrl-C
**kwargs: for each iter
e.g. steps_per_iter, epsilon, minibatch_size,
subsequence_length, buffer_length,
preconditioner, pf_kwargs, etc.
see documentation for get_iter_step()
Returns: (depends on output_all arg)
parameters (Parameters):
parameters_list (list of Parameters): length num_iters+1
"""
if observations is not None:
self.observations = observations
if init_parameters is not None:
self.parameters = init_parameters.copy()
iter_func_names, iter_func_kwargs = \
self.get_iter_step(iter_type, tqdm=tqdm, **kwargs)
if output_all:
parameters_list = [None]*(num_iters+1)
parameters_list[0] = self.parameters.copy()
# Fit Loop
pbar = range(1, num_iters+1)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("fit using {0} iters".format(iter_type))
for it in pbar:
# Run iter funcs
try:
for func_name, func_kwargs in zip(iter_func_names,
iter_func_kwargs):
getattr(self, func_name)(**func_kwargs)
if output_all:
parameters_list[it] = self.parameters.copy()
except KeyboardInterrupt as e:
if catch_interrupt:
logger.warning("Interrupt in fit:\n{0}\n".format(e) + \
"Stopping early after {0} iters".format(it))
if output_all:
return parameters_list[:it]
else:
return self.parameters.copy()
else:
raise e
if output_all:
return parameters_list
else:
return self.parameters.copy()
def fit_timed(self, iter_type, max_time=60, min_save_time=1,
observations=None, init_parameters=None, tqdm=None, tqdm_iter=False,
catch_interrupt=False,
**kwargs):
""" Run multiple learning / inference steps
Args:
iter_type (string):
'SGD', 'ADAGRAD', 'SGLD', 'SGRLD', 'Gibbs', etc.
max_time (float): maxium time in seconds to run fit
min_save_time (float): min time between saved parameters
observations (ndarray): observations to fit on, optional
init_parameters (Parameters): initial parameters, optional
tqdm (tqdm): progress bar wrapper
catch_interrupt (bool): terminate early on Ctrl-C
**kwargs: for each iter
e.g. steps_per_iter, epsilon, minibatch_size,
subsequence_length, buffer_length,
preconditioner, pf_kwargs, etc.
see documentation for get_iter_step()
Returns:
parameters_list (list of Parameters):
times (list of float): fit time for each parameter
"""
parameters_list, times, _ = self.fit_evaluate(
iter_type=iter_type,
max_time=max_time, min_save_time=min_save_time,
observations=observations, init_parameters=init_parameters,
tqdm=tqdm, tqdm_iter=tqdm_iter,
catch_interrupt=catch_interrupt,
**kwargs)
return parameters_list['parameters'].tolist(), times['time'].tolist()
def fit_evaluate(self, iter_type, metric_functions=None,
max_num_iters=None, max_time=60, min_save_time=1,
observations=None, init_parameters=None, tqdm=None, tqdm_iter=False,
catch_interrupt=False, total_max_time=None,
**kwargs):
""" Run multiple learning / inference steps with evaluator
Args:
iter_type (string):
'SGD', 'ADAGRAD', 'SGLD', 'SGRLD', 'Gibbs', etc.
metric_functions (func or list of funcs): evaluation functions
Each function takes a sampler and returns a dict or list of dict
dict(metric=string, variable=string, value=double) for each
See metric_functions.py for examples
max_num_iters (int): maximum number of iterations to save
max_time (float): maxium time in seconds to run sampler
does *not* include time used by evaluator
min_save_time (float): min time between saved parameters
observations (ndarray): observations to fit on, optional
init_parameters (Parameters): initial parameters, optional
tqdm (tqdm): progress bar wrapper
tqdm_iter (bool): progress bar for each iteration
catch_interrupt (bool): terminate early on Ctrl-C
total_max_time (float): maximum time in seconds to run fit_evaluate
**kwargs: for each iter
e.g. steps_per_iter, epsilon, minibatch_size,
subsequence_length, buffer_length,
preconditioner, pf_kwargs, etc.
see documentation for get_iter_step()
Returns:
parameters_list (pd.DataFrame): parameters saved
columns:
iteration: number of iter_func_kwargs steps called
parameters: Parameters
times (pd.DataFrame): fit time for each saved parameter
columns:
iteration: number of iter_func_kwargs steps called
time: time used by iter_func_kwargs
metrics (pd.DataFrame): metric for each saved parameter
columns:
iteration: number of iter_func_kwargs steps called
metric: name of metric
variable: name of variable
value: value of metric for variable
"""
if observations is not None:
self.observations = observations
if init_parameters is not None:
self.parameters = init_parameters.copy()
evaluator = BaseEvaluator(
sampler=self,
metric_functions=metric_functions,
)
iter_func_names, iter_func_kwargs = \
self.get_iter_step(iter_type, tqdm=tqdm, **kwargs)
if tqdm_iter:
iter_func_kwargs[0]['tqdm'] = tqdm
num_iters = max_time//min_save_time
if max_num_iters is not None:
num_iters = min(num_iters, max_num_iters)
iteration = 0
total_time = 0
parameters_list = [None]*(num_iters+1)
times = np.zeros(num_iters+1)*np.nan
iterations = np.zeros(num_iters+1, dtype=int)
fit_start_time = time.time()
last_save_time = time.time()
parameters_list[0] = self.parameters.copy()
times[0] = total_time
iterations[0] = iteration
evaluator.eval_metric_functions(iteration=iteration)
# Fit Loop
pbar = range(1, num_iters+1)
if tqdm is not None:
pbar = tqdm(pbar)
for it in pbar:
if tqdm is not None:
pbar.set_description("fit using {0}".format(iter_type) + \
" on iter {0}".format(iteration)
)
try:
for step in range(1000):
# Run iter funcs
for func_name, func_kwargs in zip(iter_func_names,
iter_func_kwargs):
getattr(self, func_name)(**func_kwargs)
if time.time() - last_save_time > min_save_time:
parameters_list[it] = self.parameters.copy()
total_time += time.time() - last_save_time
times[it] = total_time
iteration += step + 1
iterations[it] = iteration
evaluator.eval_metric_functions(iteration=iteration)
last_save_time = time.time()
break
except KeyboardInterrupt as e:
if catch_interrupt:
logger.warning("Interrupt in fit_timed:\n{0}\n".format(e) + \
"Stopping early after {0} iters".format(it))
break
else:
raise e
if total_time > max_time:
# Break it total time on iter_funcs exceeds max time
break
if total_max_time is not None:
if fit_start_time - time.time() > total_max_time:
# Break it total time on fit_evalute exceeds total max time
break
valid = np.sum(~np.isnan(times))
parameters_list = pd.DataFrame(dict(
iteration = iterations[0:valid],
parameters = parameters_list[0:valid],
))
times = pd.DataFrame(dict(
iteration = iterations[0:valid],
time = times[0:valid],
))
metric = evaluator.get_metrics()
return parameters_list, times, metric
def get_iter_step(self, iter_type, steps_per_iteration=1, **kwargs):
# Returns iter_func_names, iter_func_kwargs
project_kwargs = kwargs.get("project_kwargs",{})
if iter_type == 'Gibbs':
iter_func_names = ["sample_gibbs", "project_parameters"]
iter_func_kwargs = [{}, project_kwargs]
elif iter_type == 'custom':
iter_func_names = kwargs.get("iter_func_names")
iter_func_kwargs = kwargs.get("iter_func_kwargs")
elif iter_type in ['SGD', 'ADAGRAD', 'SGLD', 'SGRD', 'SGRLD']:
grad_kwargs = dict(
epsilon = kwargs['epsilon'],
subsequence_length = kwargs['subsequence_length'],
buffer_length = kwargs['buffer_length'],
minibatch_size = kwargs.get('minibatch_size', 1),
kind = kwargs.get("kind", "marginal"),
num_samples = kwargs.get("num_samples", None),
**kwargs.get("pf_kwargs", {})
)
if 'num_sequences' in kwargs:
grad_kwargs['num_sequences'] = kwargs['num_sequences']
if 'use_scir' in kwargs:
grad_kwargs['use_scir'] = kwargs['use_scir']
if iter_type == 'SGD':
iter_func_names = ['step_sgd', 'project_parameters']
iter_func_kwargs = [grad_kwargs, project_kwargs]
elif iter_type == 'ADAGRAD':
iter_func_names = ['step_adagrad', 'project_parameters']
iter_func_kwargs = [grad_kwargs, project_kwargs]
elif iter_type == 'SGLD':
iter_func_names = ['sample_sgld', 'project_parameters']
iter_func_kwargs = [grad_kwargs, project_kwargs]
elif iter_type == 'SGRD':
grad_kwargs['preconditioner'] = self._get_preconditioner(
kwargs.get('preconditioner')
)
iter_func_names = ['step_precondition_sgd', 'project_parameters']
iter_func_kwargs = [grad_kwargs, project_kwargs]
elif iter_type == 'SGRLD':
grad_kwargs['preconditioner'] = self._get_preconditioner(
kwargs.get('preconditioner')
)
iter_func_names = ['sample_sgrld', 'project_parameters']
iter_func_kwargs = [grad_kwargs, project_kwargs]
else:
raise ValueError("Unrecognized iter_type {0}".format(iter_type))
iter_func_names = iter_func_names * steps_per_iteration
iter_func_kwargs = iter_func_kwargs * steps_per_iteration
return iter_func_names, iter_func_kwargs
def _get_preconditioner(self, preconditioner=None):
if preconditioner is None:
raise NotImplementedError("No Default Preconditioner for {}".format(
self.name))
return preconditioner
## Predict Functions
def predict(self, target='latent', distr=None, lag=None,
return_distr=None, num_samples=None,
kind='analytic', observations=None, parameters=None,
**kwargs):
""" Make predictions based on fit
Args:
target (string): variable to predict
'latent' - latent variables
'y' - observation variables
distr (string): what distribution to sample/target
'marginal' - marginal (default for return_distr)
'joint' - joint (default for sampling)
lag (int): distribution is p(U_t | Y_{1:t+lag})
default/None -> use all observations
return_distr (bool): return distribution
(default is True if num_samples is None otherwise True)
num_samples (int): number of samples return
kind (string): how to calculate distribution
'analytic' - use message passing
'pf' - use particle filter/smoother
observations (ndarray): observations to use
parameters (Parameters): parameters
kwargs: key word arguments
tqdm (tqdm): progress bar
see message_helper.latent_var_distr,
message_helper.y_distr,
message_helper.latent_var_sample,
message_helper.y_sample,
message_helper.pf_latent_var_distr,
message_helper.pf_y_distr,
for more details
Returns:
Depends on target, return_distr, num_samples
"""
observations = self._get_observations(observations)
if parameters is None:
parameters = self.parameters
if return_distr is None:
if kind == 'pf':
return_distr = True
else:
return_distr = (num_samples is None)
if kind == 'analytic':
if return_distr:
if distr is None:
distr = 'marginal'
if target == 'latent':
return self.message_helper.latent_var_distr(
distr=distr,
lag=lag,
observations=observations,
parameters=parameters,
**kwargs,
)
elif target == 'y':
return self.message_helper.y_distr(
distr=distr,
lag=lag,
observations=observations,
parameters=parameters,
**kwargs,
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
else:
if distr is None:
distr = 'joint'
if target == 'latent':
return self.message_helper.latent_var_sample(
distr=distr,
lag=lag,
num_samples=num_samples,
observations=observations,
parameters=parameters,
**kwargs,
)
elif target == 'y':
return self.message_helper.y_sample(
distr=distr,
lag=lag,
num_samples=num_samples,
observations=observations,
parameters=parameters,
**kwargs,
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
elif kind == 'pf':
if return_distr:
if target == 'latent':
return self.message_helper.pf_latent_var_distr(
lag=lag,
observations=observations,
parameters=parameters,
**kwargs,
)
elif target == 'y':
return self.message_helper.pf_y_distr(
distr=distr,
lag=lag,
observations=observations,
parameters=parameters,
**kwargs,
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
else:
raise ValueError("return_distr must be True for kind = pf")
else:
raise ValueError("Unrecognized kind == '{0}'".format(kind))
def simulate(self, T, init_message=None,
return_distr=False, num_samples=None,
kind='analytic', observations=None, parameters=None,
**kwargs):
""" Simulate dynamics
Args:
T (int): length of simulated data
init_message (dict): initial forward message
return_distr (bool): return distribution (default is False)
num_samples (int): number of samples return
kind (string): how to calculate distribution
'analytic' - use message passing
'pf' - use particle filter/smoother
observations (ndarray): observations
parameters (Parameters): parameters
Returns:
dict with key values depending on return_distr and num_samples
latent_vars (ndarray): simulated latent vars
observations (ndarray): simulated observations
latent_mean/latent_prob/latent_cov
observation_mean/observation_prob/observations_cov
"""
observations = self._get_observations(observations)
if parameters is None:
parameters = self.parameters
if kind == 'analytic':
if init_message is None:
init_message = self.message_helper.forward_message(
observations=observations,
parameters=parameters,
)
if return_distr:
return self.message_helper.simulate_distr(
T=T,
parameters=parameters,
init_message=init_message,
**kwargs
)
else:
return self.message_helper.simulate(
T=T,
parameters=parameters,
init_message=init_message,
num_samples=num_samples,
**kwargs
)
elif kind == 'pf':
raise NotImplementedError()
else:
raise ValueError("Unrecognized kind == '{0}'".format(kind))
## Attributes + Misc Helper Functions
@property
def observations(self):
return self._observations
@observations.setter
def observations(self, observations):
self._check_observation_shape(observations)
self._observations = observations
return
def _check_observation_shape(self, observations):
return
def _get_observations(self, observations, check_shape=True):
if observations is None:
observations = self.observations
if observations is None:
raise ValueError("observations not specified")
elif check_shape:
self._check_observation_shape(observations)
return observations
def _get_T(self, **kwargs):
T = kwargs.get('T')
if T is None:
observations = kwargs.get('observations')
observations = self._get_observations(observations)
T = observations.shape[0]
return T
# SeqSGMCMCSampler
class SeqSGMCMCSampler(object):
""" Mixin for handling a list of sequences """
def _get_T(self, **kwargs):
T = kwargs.get('T')
if T is None:
observations = kwargs.get('observations')
observations = self._get_observations(observations)
T = np.sum(np.shape(observation)[0] for observation in observations)
return T
def _check_observation_shape(self, observations):
if observations is not None:
for ii, observation in enumerate(observations):
try:
super()._check_observation_shape(observations=observation)
except ValueError as e:
raise ValueError("Error in observations[{0}] :\n{1}".format(
ii, e))
def exact_loglikelihood(self, observations=None, tqdm=None):
""" Return exact loglikelihood over all observation sequences """
observations = self._get_observations(observations)
loglikelihood = 0
pbar = observations
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("Seq Loglikelihood")
for observation in pbar:
loglikelihood += self.message_helper.marginal_loglikelihood(
observations=observation,
parameters=self.parameters,
forward_message=self.forward_message,
backward_message=self.backward_message,
tqdm=tqdm,
)
return loglikelihood
def noisy_loglikelihood(self, num_sequences=-1, observations=None,
tqdm=None, **kwargs):
""" Subsequence Approximation to loglikelihood
Args:
num_sequences (int): how many observation sequences to use
(default = -1) is to use all observation sequences
"""
observations = self._get_observations(observations)
loglikelihood = 0
S = 0.0
sequence_indices = np.arange(len(observations))
if num_sequences != -1:
sequence_indices = np.random.choice(
sequence_indices, num_sequences, replace=False,
)
pbar = sequence_indices
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("Seq Loglikelihood")
for sequence_index in pbar:
S += observations[sequence_index].shape[0]
loglikelihood += super().noisy_loglikelihood(
observations=observations[sequence_index],
tqdm=tqdm,
**kwargs)
if num_sequences != -1:
loglikelihood *= self._get_T(**kwargs) / S
return loglikelihood
def predictive_loglikelihood(self, num_sequences=-1, observations=None,
tqdm=None, **kwargs):
""" Return the predictive loglikelihood given the parameters """
observations = self._get_observations(observations)
predictive_loglikelihood = 0
S = 0.0
sequence_indices = np.arange(len(observations))
if num_sequences != -1:
sequence_indices = np.random.choice(
sequence_indices, num_sequences, replace=False,
)
pbar = sequence_indices
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("Seq Pred Loglikelihood")
for sequence_index in pbar:
S += observations[sequence_index].shape[0]
predictive_loglikelihood += super().predictive_loglikelihood(
observations=observations[sequence_index],
tqdm=tqdm,
**kwargs)
if num_sequences != -1:
predictive_loglikelihood *= self._get_T(**kwargs) / S
return predictive_loglikelihood
def _noisy_grad_loglikelihood(self, num_sequences=-1, **kwargs):
""" Subsequence approximation to gradient of loglikelihood
Args:
num_sequences (int): how many observation sequences to use
(default = -1) is to use all observation sequences
"""
noisy_grad_loglike = None
S = 0.0
sequence_indices = np.arange(len(self.observations))
if num_sequences != -1:
sequence_indices = np.random.choice(
sequence_indices, num_sequences, replace=False,
)
for sequence_index in sequence_indices:
noisy_grad_index = super()._noisy_grad_loglikelihood(
observations=self.observations[sequence_index],
**kwargs)
S += self.observations[sequence_index].shape[0]
if noisy_grad_loglike is None:
noisy_grad_loglike = {var: noisy_grad_index[var]
for var in noisy_grad_index.keys()
}
else:
noisy_grad_loglike = {
var: noisy_grad_loglike[var] + noisy_grad_index[var]
for var in noisy_grad_index.keys()
}
if num_sequences != -1:
noisy_grad_loglike = {
var: noisy_grad_loglike[var] * self._get_T(**kwargs) / S
for var in noisy_grad_index.keys()
}
return noisy_grad_loglike
def predict(self, target='latent', distr=None, lag=None,
return_distr=None, num_samples=None,
kind='analytic', observations=None, parameters=None,
tqdm=None,
**kwargs):
""" Make predictions based on fit
Args:
target (string): variable to predict
'latent' - latent variables
'y' - observation variables
distr (string): what distribution to sample/target
'marginal' - marginal (default for return_distr)
'joint' - joint (default for sampling)
lag (int): distribution is p(U_t | Y_{1:t+lag})
default/None -> use all observations
return_distr (bool): return distribution
(default is True if num_samples is None otherwise True)
num_samples (int): number of samples return
kind (string): how to calculate distribution
'analytic' - use message passing
'pf' - use particle filter/smoother
observations (list of ndarray): observations to use
parameters (Parameters): parameters
kwargs: key word arguments
tqdm (tqdm): progress bar
see message_helper.latent_var_distr,
message_helper.y_distr,
message_helper.latent_var_sample,
message_helper.y_sample,
message_helper.pf_latent_var_distr,
message_helper.pf_y_distr,
for more details
Returns:
Depends on target, return_distr, num_samples
"""
observations = self._get_observations(observations)
if parameters is None:
parameters = self.parameters
if return_distr is None:
if kind == 'pf':
return_distr = True
else:
return_distr = (num_samples is None)
output = []
if tqdm is not None:
kwargs['tqdm'] = tqdm
observations = tqdm(observations, desc='sequence #')
if kind == 'analytic':
if return_distr:
if distr is None:
distr = 'marginal'
if target == 'latent':
for observation in observations:
output.append(
self.message_helper.latent_var_distr(
distr=distr,
lag=lag,
observations=observation,
parameters=parameters,
**kwargs,
)
)
elif target == 'y':
for observation in observations:
output.append(
self.message_helper.y_distr(
distr=distr,
lag=lag,
observations=observation,
parameters=parameters,
**kwargs,
)
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
else:
if distr is None:
distr = 'joint'
if target == 'latent':
for observation in observations:
output.append(
self.message_helper.latent_var_sample(
distr=distr,
lag=lag,
num_samples=num_samples,
observations=observation,
parameters=parameters,
**kwargs,
)
)
elif target == 'y':
for observation in observations:
output.append(
self.message_helper.y_sample(
distr=distr,
lag=lag,
num_samples=num_samples,
observations=observation,
parameters=parameters,
**kwargs,
)
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
elif kind == 'pf':
if return_distr:
if target == 'latent':
for observation in observations:
output.append(
self.message_helper.pf_latent_var_distr(
lag=lag,
observations=observation,
parameters=parameters,
**kwargs,
)
)
elif target == 'y':
for observation in observations:
output.append(
self.message_helper.pf_y_distr(
distr=distr,
lag=lag,
observations=observation,
parameters=parameters,
**kwargs,
)
)
else:
raise ValueError("Unrecognized target '{0}'".format(target))
else:
raise ValueError("return_distr must be True for kind = pf")
else:
raise ValueError("Unrecognized kind == '{0}'".format(kind))
return output
# SGMCMC Helper
class SGMCMCHelper(object):
""" Base Class for SGMCMC Helper """
def __init__(self, **kwargs):
raise NotImplementedError()
## Message Passing Functions
def forward_message(self, observations, parameters, forward_message=None,
**kwargs):
""" Calculate forward messages over the observations
Pr(u_t | y_{<=t}) for y_t in observations
Args:
observations (ndarray): observations
parameters (parameters): parameters
forward_message (dict): latent state prior Pr(u_{-1} | y_{<=-1})
Returns:
forward_message (dict): same format as forward_message
"""
if forward_message is None:
forward_message = self.default_forward_message
if np.shape(observations)[0] == 0: return forward_message
forward_message = self._forward_messages(
observations=observations,
parameters=parameters,
forward_message=forward_message,
only_return_last=True,
**kwargs
)
return forward_message
def backward_message(self, observations, parameters, backward_message=None,
**kwargs):
""" Calculate backward messages over the observations
Pr(y_{>t} | u_t) for y_t in observations
Args:
observations (ndarray): observations
parameters (parameters): parameters
backward_message (dict): backward message Pr(y_{>T-1} | u_{T-1})
Returns:
backward_message (dict): same format as forward_message
"""
if backward_message is None:
backward_message = self.default_backward_message
if np.shape(observations)[0] == 0: return backward_message
backward_message = self._backward_messages(
observations=observations,
parameters=parameters,
backward_message=backward_message,
only_return_last=True,
**kwargs
)
return backward_message
def forward_pass(self, observations, parameters,
forward_message=None, include_init_message=False, **kwargs):
""" Calculate forward messages over the observations
Pr(u_t | y_{<=t}) for y_t in observations
Args:
observations (ndarray): observations
parameters (parameters): parameters
forward_message (dict): latent state prior Pr(u_{-1} | y_{<=-1})
include_init_message (boolean) whether to include t = -1
Returns:
forward_messages (list of dict): same format as forward_message
"""
if forward_message is None:
forward_message = self.default_forward_message
if np.shape(observations)[0] == 0:
if include_init_message:
return [forward_message]
else:
return []
forward_messages = self._forward_messages(
observations=observations,
parameters=parameters,
forward_message=forward_message,
**kwargs
)
if include_init_message:
return forward_messages
else:
return forward_messages[1:]
def backward_pass(self, observations, parameters,
backward_message=None, include_init_message=False, **kwargs):
""" Calculate backward message over the observations
Pr(y_{>t} | u_t) for y_t in observations
Args:
observations (ndarray): observations
parameters (parameters): parameters
backward_message (dict): backward message Pr(y_{>T-1} | u_{T-1})
include_init_message (boolean) whether to include t = -1
Returns:
backward_messages (list of dict): same format as backward_message
"""
if backward_message is None:
backward_message = self.default_backward_message
if np.shape(observations)[0] == 0:
if include_init_message:
return [backward_message]
else:
return []
backward_messages = self._backward_messages(
observations=observations,
parameters=parameters,
backward_message=backward_message,
**kwargs
)
if include_init_message:
return backward_messages
else:
return backward_messages[1:]
def _forward_messages(self, observations, parameters, forward_message,
weights=None, only_return_last=False, **kwargs):
raise NotImplementedError()
def _backward_messages(self, observations, parameters, backward_message,
weights=None, only_return_last=False, **kwargs):
raise NotImplementedError()
def _forward_message(self, observations, parameters, forward_message,
**kwargs):
return self._forward_messages(observations, parameters, forward_message,
only_return_last=True, **kwargs)
def _backward_message(self, observations, parameters, backward_message,
**kwargs):
return self._backward_messages(observations, parameters,
backward_message, only_return_last=True, **kwargs)
## Loglikelihood Functions
def marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None, weights=None,
tqdm=None):
""" Calculate the marginal loglikelihood Pr(y | theta)
Args:
observations (ndarray): observations
parameters (Parameters): parameters
forward_message (dict): latent state forward message
backward_message (dict): latent state backward message
weights (ndarray): optional, weights for loglikelihood calculation
Returns:
marginal_loglikelihood (float): marginal loglikelihood
"""
raise NotImplementedError()
def predictive_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None, lag=1):
""" Calculate the predictive loglikelihood
pred_loglikelihood = sum_t Pr(y_{t+lag} | y_{<t} theta)
Args:
observations (ndarray): observations
parameters (Parameters): parameters
forward_message (dict): latent state forward message
backward_message (dict): latent state backward message
lag (int): how many steps ahead to predict
Returns:
pred_loglikelihood (float): predictive loglikelihood
"""
raise NotImplementedError()
def complete_data_loglikelihood(self, observations, latent_vars, parameters,
forward_message=None, weights=None, **kwargs):
""" Calculate the complete data loglikelihood Pr(y, u | theta)
Args:
observations (ndarray): observations
latent_vars (ndarray): latent vars
parameters (Parameters): parameters
forward_message (dict): latent state forward message
weights (ndarray): optional, weights for loglikelihood calculation
Returns:
complete_data_loglikelihood (float): complete data loglikelihood
"""
raise NotImplementedError()
## Gradient Functions
def gradient_marginal_loglikelihood(self, observations, parameters,
forward_message=None, backward_message=None, weights=None, **kwargs):
""" Gradient Calculation
Gradient of log Pr(y_[0:T) | y_<0, y_>=T, parameters)
Args:
observations (ndarray): num_obs observations
parameters (Parameters): parameters
forward_message (dict): Pr(u_-1, y_<0 | parameters)
backward_message (dict): Pr(y_>T | u_T, parameters)
weights (ndarray): how to weight terms
Returns
grad (dict): grad of variables in parameters
"""
raise NotImplementedError()
def gradient_complete_data_loglikelihood(self, observations, latent_vars,
parameters, forward_message=None, weights=None, **kwargs):
""" Gradient Calculation
Gradient of log Pr(y_[0:T), u_[0:T) | y_<0, parameters)
Args:
observations (ndarray): num_obs observations
latent_vars (ndarray): num_obs latent vars
parameters (Parameters): parameters
forward_message (dict): Pr(u_-1, y_<0 | parameters)
weights (ndarray): how to weight terms
Returns
grad (dict): grad of variables in parameters
"""
raise NotImplementedError()
## Gibbs Functions
def parameters_gibbs_sample(self, observations, latent_vars, prior,
**kwargs):
""" Gibbs sample parameters based on data
Samples parameters from the posterior conditional distribution
theta ~ Pr(theta | y, u)
Args:
observations (ndarray): num_obs observations
latent_vars (ndarray): num_obs latent variables
prior (prior): prior
Returns
sample_parameters (parameters): sampled parameters
"""
sufficient_stat = self.calc_gibbs_sufficient_statistic(
observations, latent_vars, **kwargs,
)
sample_parameters = prior.sample_posterior(
sufficient_stat, **kwargs,
)
return sample_parameters
def calc_gibbs_sufficient_statistic(self, observations, latent_vars,
**kwargs):
""" Gibbs Sample Sufficient Statistics
Args:
observations (ndarray): num_obs observations
latent_vars (ndarray): latent vars
Returns:
sufficient_stat (dict of dict)
keys are parameter
values are dict for parameter's sufficient statistics
"""
raise NotImplementedError()
## Predict Functions
def latent_var_distr(self, observations, parameters,
distr='marginal', lag=None,
forward_message=None, backward_message=None,
tqdm=None, **kwargs):
""" Sample latent vars distribution conditional on observations
Returns distribution for (u_t | y_{<= t+lag}, theta)
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
forward_message (dict): forward message
backward_message (dict): backward message
Returns:
Depends on latent var type, Gaussian -> mean, cov; Discrete -> prob
"""
raise NotImplementedError()
def latent_var_sample(self, observations, parameters,
distr='joint', lag=None, num_samples=None,
forward_message=None, backward_message=None,
include_init=False, tqdm=None, **kwargs):
""" Sample latent vars conditional on observations
Samples u_t ~ u_t | y_{<= t+lag}, theta
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
num_samples (int, optional) number of samples
forward_message (dict): forward message
backward_message (dict): backward message
include_init (bool, optional): whether to sample u_{-1} | y
Returns:
sampled_latent_vars : shape depends on num_samples parameters
last dimension is num_samples
"""
raise NotImplementedError()
def y_distr(self, observations, parameters,
distr='marginal', lag=None,
forward_message=None, backward_message=None,
latent_var=None, tqdm=None, **kwargs):
""" Sample observation distribution conditional on observations
Returns distribution for (y_t* | y_{<= t+lag}, theta)
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
forward_message (dict): forward message
backward_message (dict): backward message
latent_var (ndarray): latent vars
if provided, will return (y_t* | u_t, theta) instead
Returns:
Depends on observation type, Gaussian -> mean, cov; Discrete -> prob
"""
raise NotImplementedError()
def y_sample(self, observations, parameters,
distr='joint', lag=None, num_samples=None,
forward_message=None, backward_message=None,
latent_var=None, tqdm=None, **kwargs):
""" Sample new observations conditional on observations
Samples y_t* ~ y_t* | y_{<= t+lag}, theta
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
num_samples (int, optional) number of samples
forward_message (dict): forward message
backward_message (dict): backward message
latent_var (ndarray): latent vars
if provided, will sample from (y_t* | u_t, theta) instead
must match num_samples parameters
Returns:
sampled_observations : shape depends on num_samples parameters
last dimension is num_samples
"""
raise NotImplementedError()
def simulate_distr(self, T, parameters, init_message=None, tqdm=None):
raise NotImplementedError()
def simulate(self, T, parameters, init_message=None, num_samples=None, tqdm=None):
raise NotImplementedError()
## PF Functions
def pf_loglikelihood_estimate(self, observations, parameters,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel='prior', forward_message=None,
**kwargs):
""" Particle Filter Marginal Log-Likelihood Estimate
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
weights (ndarray): weights (to correct storchastic approx)
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(u_t | u_{t-1})
"optimal" - bootstrap filter P(u_t | u_{t-1}, Y_t)
forward_message (dict): prior for buffered subsequence
**kwargs - additional keyword args for individual filters
Return:
loglikelihood (double): marignal log likelihood estimate
"""
raise NotImplementedError()
def pf_predictive_loglikelihood_estimate(self, observations, parameters,
num_steps_ahead=1,
subsequence_start=0, subsequence_end=None, weights=None,
pf="filter", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Particle Filter Predictive Log-Likelihoood Estimate
Returns predictive log-likleihood for k = [0,1, ...,num_steps_ahead]
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
num_steps_ahead (int): number of steps
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
N (int): number of particles used by particle filter
kernel (string): kernel to use
forward_message (dict): prior for buffered subsequence
**kwargs - additional keyword args for individual filters
Return:
predictive_loglikelihood (num_steps_ahead + 1 ndarray)
"""
raise NotImplementedError()
def pf_gradient_estimate(self, observations, parameters,
subsequence_start=0, subsequence_end=None, weights=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Particle Smoother Gradient Estimate
Args:
observations (ndarray): num_obs bufferd observations
parameters (Parameters): parameters
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
weights (ndarray): weights (to correct storchastic approx)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(u_t | u_{t-1})
"optimal" - bootstrap filter P(u_t | u_{t-1}, Y_t)
forward_message (dict): prior for buffered subsequence
**kwargs - additional keyword args for individual filters
Return:
grad (dict): grad of variables in parameters
"""
raise NotImplementedError()
def pf_latent_var_distr(self, observations, parameters, lag=None,
subsequence_start=0, subsequence_end=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Sample latent vars distribution conditional on observations
Returns distribution for (u_t | y_{<= t+lag}, theta)
Estimated using particle filter/smoother
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(u_t | u_{t-1})
"optimal" - bootstrap filter P(u_t | u_{t-1}, Y_t)
forward_message (dict): prior for buffered subsequence
**kwargs - additional keyword args for individual filters
Returns:
Depends on latent var type, Gaussian -> mean, cov; Discrete -> prob
"""
raise NotImplementedError()
def pf_y_distr(self, observations, parameters,
distr='marginal', lag=None,
subsequence_start=0, subsequence_end=None,
pf="poyiadjis_N", N=1000, kernel=None, forward_message=None,
**kwargs):
""" Sample observation distribution conditional on observations
Returns distribution for (u_t | y_{<= t+lag}, theta)
Estimated using particle filter/smoother
Args:
observations (ndarray): observations
parameters (LGSSMParameters): parameters
lag (int): what observations to condition on, None = all
subsequence_start (int): relative start of subsequence
(0:subsequence_start are left buffer)
subsequence_end (int): relative end of subsequence
(subsequence_end: is right buffer)
pf (string): particle filter name
"nemeth" - use Nemeth et al. O(N)
"poyiadjis_N" - use Poyiadjis et al. O(N)
"poyiadjis_N2" - use Poyiadjis et al. O(N^2)
"paris" - use PaRIS Olsson + Westborn O(N log N)
N (int): number of particles used by particle filter
kernel (string): kernel to use
"prior" - bootstrap filter P(u_t | u_{t-1})
"optimal" - bootstrap filter P(u_t | u_{t-1}, Y_t)
forward_message (dict): prior for buffered subsequence
**kwargs - additional keyword args for individual filters
Returns:
Depends on latent var type, Gaussian -> mean, cov; Discrete -> prob
"""
raise NotImplementedError()
# Helper Function for Sampling Subsequences
def random_subsequence_and_weights(S, T, partition_style=None):
""" Get Subsequence + Weights
Args:
S (int): length of subsequence
T (int): length of full sequence
partition_style (string): what type of partition
'strict' - strict partition, with weights
'uniform' - uniformly, with weights
'naive' - uniformly, with incorrect weights (not recommended)
Returns:
subsequence_start (int): start of subsequence (inclusive)
subsequence_end (int): end of subsequence (exclusive)
weights (ndarray): weights for [start,end)
"""
if partition_style is None:
partition_style = 'uniform'
if partition_style == 'strict':
if T % S != 0:
raise ValueError("S {0} does not evenly divide T {1}".format(S, T)
)
subsequence_start = np.random.choice(np.arange(0, T//S)) * S
subsequence_end = subsequence_start + S
weights = np.ones(S, dtype=float)*T/S
elif partition_style == 'uniform':
subsequence_start = np.random.randint(0, T-S+1)
subsequence_end = subsequence_start + S
t = np.arange(subsequence_start, subsequence_end)
if subsequence_end <= 2*S:
num_sequences = np.min(np.array([
t+1, np.ones_like(t)*min(S, T-S+1)
]), axis=0)
elif subsequence_start >= T-2*S-1:
num_sequences = np.min(np.array([
T-t, np.ones_like(t)*min(S, T-S+1)
]), axis=0)
else:
num_sequences = np.ones(S)*S
weights = np.ones(S, dtype=float)*(T-S+1)/num_sequences
elif partition_style == 'naive':
# Not recommended because the weights are incorrect
subsequence_start = np.random.randint(0, T-S+1)
subsequence_end = subsequence_start + S
weights = np.ones(S, dtype=float)*T/S
else:
raise ValueError("Unrecognized partition_style = '{0}'".format(
partition_style))
return int(subsequence_start), int(subsequence_end), weights
| 82,434 | 39.789213 | 86 | py |
CCasGNN | CCasGNN-main/layers.py | #encoding: utf-8
import torch
from torch_geometric.nn import GCNConv, GATConv
from math import sqrt
class Positional_GAT(torch.nn.Module):
def __init__(self, in_channels, out_channels, n_heads, location_embedding_dim, filters_1, filters_2, dropout):
super(Positional_GAT, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.n_heads = n_heads
self.filters_1 = filters_1
self.filters_2 = filters_2
self.dropout = dropout
self.location_embedding_dim = location_embedding_dim
self.setup_layers()
def setup_layers(self):
self.GAT_1 = GATConv(in_channels=self.in_channels,out_channels=self.filters_1, heads=self.n_heads, dropout=0.1)
self.GAT_2 = GATConv(in_channels=self.filters_1 * self.n_heads + self.location_embedding_dim, out_channels=self.out_channels, heads=self.n_heads, dropout=0.1, concat=False)
def forward(self, edge_indices, features, location_embedding):
features = torch.cat((features, location_embedding), dim=-1)
features = self.GAT_1(features, edge_indices)
features = torch.nn.functional.relu(features)
features = torch.nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = torch.cat((features, location_embedding), dim=-1)
features = self.GAT_2(features, edge_indices)
return features
class Positional_GCN(torch.nn.Module):
def __init__(self, in_channels, out_channels, location_embedding_dim, filters_1, filters_2, dropout):
"""
GCN function
:param args: Arguments object.
:param in_channel: Nodes' input feature dimensions
:param out_channel: Nodes embedding dimension
:param bais:
"""
super(Positional_GCN, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filters_1 = filters_1
self.filters_2 = filters_2
self.dropout = dropout
self.location_embedding_dim = location_embedding_dim
self.setup_layers()
def setup_layers (self):
self.convolution_1 = GCNConv(self.in_channels, self.filters_1)
self.convolution_2 = GCNConv(self.filters_1 + self.location_embedding_dim, self.out_channels)
def forward (self, edge_indices, features, location_embedding):
"""
making convolution
:param edge_indices: 2 * edge_number
:param features: N * feature_size
:return:
"""
features = torch.cat((features, location_embedding), dim=-1)
features = self.convolution_1(features, edge_indices)
features = torch.nn.functional.relu(features)
features = torch.nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = torch.cat((features, location_embedding), dim=-1)
features = self.convolution_2(features, edge_indices)
return features
class MultiHeadGraphAttention(torch.nn.Module):
def __init__(self, num_heads, dim_in, dim_k, dim_v):
super(MultiHeadGraphAttention, self).__init__()
#"dim_k and dim_v must be multiple of num_heads"
assert dim_k % num_heads == 0 and dim_v % num_heads == 0
self.num_heads = num_heads
self.dim_in = dim_in
self.dim_k = dim_k
self.dim_v = dim_v
self.linear_q = torch.nn.Linear(dim_in, dim_k, bias=False)
self.linear_k = torch.nn.Linear(dim_in, dim_k, bias=False)
self.linear_v = torch.nn.Linear(dim_in, dim_v, bias=False)
self.leaky_relu = torch.nn.LeakyReLU(negative_slope=0.2)
self._nor_fact = 1 / sqrt(dim_k // num_heads)
def forward(self, x):
# x: tensor of shape (batch, n, dim_in)
batch, n, dim_in = x.shape
assert dim_in == self.dim_in
nh = self.num_heads
dk = self.dim_k // nh #dim_k of each head
dv = self.dim_v // nh
q = self.linear_q(x).reshape(batch, n, nh, dk).transpose(1,2) # (batch, nh, n, dk)
k = self.linear_k(x).reshape(batch, n, nh, dk).transpose(1,2)
v = self.linear_v(x).reshape(batch, n, nh, dv).transpose(1,2)
dist = torch.matmul(q, k.transpose(2,3)) * self._nor_fact # batch, nh, n, n
# label = torch.where(dist == 0, torch.tensor(1), torch.tensor(0))
# dist.data.masked_fill_(label, -float("inf"))
dist = self.leaky_relu(dist) # batch, nh, n, n
# dist = torch.where(torch.isnan(dist), torch.full_like(dist,0), dist)
att = torch.matmul(dist, v) # batch, nh, n, dv
att = att.transpose(1,2).reshape(batch, n, self.dim_v)
return att
class dens_Net(torch.nn.Module):
def __init__(self,dens_hiddensize, dens_dropout, dens_inputsize, dens_outputsize):
super(dens_Net, self).__init__()
self.inputsize = dens_inputsize
self.dens_hiddensize = dens_hiddensize
self.dens_dropout = dens_dropout
self.outputsize = dens_outputsize
self.setup_layers()
def setup_layers(self):
self.dens_net = torch.nn.Sequential(
torch.nn.Linear(self.inputsize, self.dens_hiddensize),
torch.nn.Dropout(p=self.dens_dropout),
torch.nn.Linear(self.dens_hiddensize, self.dens_hiddensize),
torch.nn.Dropout(p=self.dens_dropout),
torch.nn.Linear(self.dens_hiddensize, self.outputsize)
)
def forward(self, x1, x2):
return torch.nn.functional.relu(self.dens_net(x1)), torch.nn.functional.relu(self.dens_net(x2))
# return torch.nn.functional.relu(self.dens_net(x1))
class fuse_gate(torch.nn.Module):
def __init__(self, batch_size, in_dim):
super(fuse_gate, self).__init__()
self.indim = in_dim
self.batch_size = batch_size
self.setup_layers()
def setup_layers(self):
self.omega = torch.nn.Parameter(torch.tensor([[0.5],[0.5]]))
def forward(self, x):
omega = self.omega.transpose(1,0)
prediction = torch.matmul(omega, x)
return prediction, self.omega[0], self.omega[1]
| 6,309 | 41.635135 | 180 | py |
CCasGNN | CCasGNN-main/CCasGNN.py | #encoding: utf-8
import torch
import json
import numpy as np
import copy
import time
import sys
import math
from layers import Positional_GCN, MultiHeadGraphAttention, dens_Net, Positional_GAT, fuse_gate
import scipy.stats as sci
class CCasGNN(torch.nn.Module):
def __init__(self, args):
super(CCasGNN, self).__init__()
self.args = args
self.number_of_features = self.args.number_of_features
self.number_of_nodes = self.args.number_of_nodes
self._setup_layers()
def _setup_GCN_layers(self):
self.GCN_layers = Positional_GCN(in_channels=self.args.user_embedding_dim + self.args.location_embedding_dim,
out_channels=self.args.gcn_out_channel,
location_embedding_dim=self.args.location_embedding_dim,
filters_1=self.args.gcn_filters_1,
filters_2=self.args.gcn_filters_2,
dropout=self.args.gcn_dropout) #self.args.user_embedding_dim
def _setup_GAT_layers(self):
self.GAT_layers = Positional_GAT(in_channels=self.args.user_embedding_dim + self.args.location_embedding_dim,
out_channels=self.args.gcn_out_channel,
n_heads=self.args.gat_n_heads,
location_embedding_dim=self.args.location_embedding_dim,
filters_1=self.args.gcn_filters_1,
filters_2=self.args.gcn_filters_2,
dropout=self.args.gcn_dropout) #self.number_of_features + self.args.location_embedding_dim
def _setup_MultiHead_att_layers(self):
self.MultiHead_att_layers = MultiHeadGraphAttention(num_heads=self.args.att_num_heads,
dim_in=self.args.gcn_out_channel,
dim_k=self.args.att_dim_k,
dim_v=self.args.att_dim_v)
def _setup_dens_layers (self):
self.dens_layers = dens_Net(dens_inputsize=self.args.gcn_out_channel,
dens_hiddensize=self.args.dens_hiddensize,
dens_dropout=self.args.dens_dropout,
dens_outputsize=self.args.dens_outsize
) #self.args.attn_out_dim
def _setup_fuse_layers(self):
self.fuse_layers = fuse_gate(batch_size=1,
in_dim=2)
def _setup_layers(self):
self._setup_GCN_layers()
self._setup_MultiHead_att_layers()
self._setup_dens_layers()
self._setup_GAT_layers()
self._setup_fuse_layers()
def forward(self,data):
true_nodes_num = data["true_nodes_num"]
features = data['features'][:true_nodes_num]
edges = data['edges']
undirected_edges = data['undirected_edges']
location_embedding = data['location_embedding'][:true_nodes_num]
GAT_representation = torch.nn.functional.relu(self.GAT_layers(edges, features, location_embedding))
GAT_representation = GAT_representation[:true_nodes_num] #nodes_num * feature_num
GAT_representation = torch.mean(GAT_representation, dim=0, keepdim=False)
GCN_representation = torch.nn.functional.relu(self.GCN_layers(undirected_edges, features, location_embedding))
GCN_representation = GCN_representation[:true_nodes_num] #nodes_num * feature_num
GCN_representation = GCN_representation.unsqueeze(dim=0) #batch_size * nodes_num * feature_num
#
GCN_att_representation = self.MultiHead_att_layers(GCN_representation) #batch_size * nodes_num * feature_num
GCN_att_representation = torch.mean(GCN_att_representation, dim=1, keepdim=False) #batch_size * feature_num
GCN_squeeze_att_representation = GCN_att_representation.squeeze(dim=0)
GAT_pred, GCN_pred = self.dens_layers(GAT_representation, GCN_squeeze_att_representation)
model_predict = torch.cat((GAT_pred, GCN_pred), dim=0)
prediction, omega1, omega2 = self.fuse_layers(model_predict)
return prediction, omega1, omega2, GAT_pred, GCN_pred
class CCasGNN_Trainer(torch.nn.Module):
def __init__(self, args):
super(CCasGNN_Trainer, self).__init__()
self.args = args
self.setup_model()
def setup_model(self):
self.load_graph_data()
self.model = CCasGNN(self.args)
def load_graph_data(self):
self.number_of_nodes = self.args.number_of_nodes
self.number_of_features = self.args.number_of_features
self.graph_data = json.load(open(self.args.graph_file_path, 'r'))
N = len(self.graph_data) #the number of graphs
train_start, valid_start, test_start = \
0, int(N * self.args.train_ratio), int(N * (self.args.train_ratio + self.args.valid_ratio))
train_graph_data = self.graph_data[0:valid_start] #list type [dict,dict,...]
valid_graph_data = self.graph_data[valid_start:test_start]
test_graph_data = self.graph_data[test_start:N]
self.train_batches, self.valid_batches, self.test_batches = [], [], []
for i in range(0, len(train_graph_data), self.args.batch_size):
self.train_batches.append(train_graph_data[i:i+self.args.batch_size])
for j in range(0, len(valid_graph_data), self.args.batch_size):
self.valid_batches.append(valid_graph_data[j:j+self.args.batch_size])
for k in range(0, len(test_graph_data), self.args.batch_size):
self.test_batches.append(test_graph_data[k:k+self.args.batch_size])
def create_edges(self,data):
"""
create an Edge matrix
:param data:
:return: Edge matrix
"""
self.nodes_map = [str(nodes_id) for nodes_id in data['nodes']]
self.true_nodes_num = len(data['nodes'])
edges = [[self.nodes_map.index(str(edge[0])), self.nodes_map.index(str(edge[1]))] for edge in data['edges']]
undirected_edges = edges + [[self.nodes_map.index(str(edge[1])), self.nodes_map.index(str(edge[0]))] for edge in data['edges']]
return torch.t(torch.LongTensor(edges)), torch.t(torch.LongTensor(undirected_edges))
def create_location_embedding(self, omega=0.001):
location_dim = self.args.location_embedding_dim
location_emb = torch.zeros(self.number_of_nodes, location_dim)
for i in range(self.number_of_nodes):
for j in range(location_dim):
if j % 2 == 0:
location_emb[i][j] = math.sin(i * math.pow(omega, j / location_dim))
else:
location_emb[i][j] = math.cos(i * math.pow(omega, (j - 1) / location_dim))
return location_emb
def create_target(self,data):
return torch.tensor([data['activated_size']])
def create_features(self,data):
features = np.zeros((self.number_of_nodes, self.args.user_embedding_dim))
# features = np.zeros((self.number_of_nodes, self.number_of_features))
for nodes_id in data['nodes']:
features[self.nodes_map.index(str(nodes_id))][:self.args.user_embedding_dim] = data['nodes_embedding'][str(nodes_id)]
# features[self.nodes_map.index(str(nodes_id))][self.args.user_embedding_dim:] = data['location_embedding'][str(nodes_id)]
features = torch.FloatTensor(features)
return features
def create_input_data(self, data):
"""
:param data: one data in the train/valid/test graph data
:return: to_pass_forward: Data dictionary
"""
to_pass_forward = dict()
activated_size = self.create_target(data)
edges, undirected_edges = self.create_edges(data)
features = self.create_features(data)
user_embedding = self.create_user_embedding(data)
location_embedding = self.create_location_embedding(omega=0.001)
to_pass_forward["edges"] = edges
to_pass_forward["undirected_edges"] = undirected_edges
to_pass_forward["features"] = features
to_pass_forward["true_nodes_num"] = self.true_nodes_num
to_pass_forward['location_embedding'] = location_embedding
return to_pass_forward, activated_size
def create_forward_data(self, data_batches):
data_x, data_y = [], []
for data_batch in data_batches:
data_x_tmp, data_y_tmp = [], []
for each_data in data_batch:
input_data, target = self.create_input_data(each_data)
data_x_tmp.append(input_data)
data_y_tmp.append(target)
data_x.append(copy.deepcopy(data_x_tmp))
data_y.append(copy.deepcopy(data_y_tmp))
return data_x, data_y
def fit(self):
print('\nLoading data.\n')
self.model.train()
train_data_x, train_data_y = self.create_forward_data(self.train_batches)
valid_data_x, valid_data_y = self.create_forward_data(self.valid_batches)
test_data_x, test_data_y = self.create_forward_data(self.test_batches)
optimizer = torch.optim.Adam(self.model.parameters(),
lr=self.args.learning_rate,
weight_decay=self.args.weight_decay)
time_start = time.time()
print('\nTraining started.\n')
for epoch in range(self.args.epochs):
losses = 0.
average_loss = 0.
for step, (train_x_batch, train_y_batch) in enumerate(zip(train_data_x, train_data_y)):
optimizer.zero_grad()
GAT_prediction_tensor = torch.tensor([])
GCN_prediction_tensor = torch.tensor([])
target_tensor = torch.tensor([])
for k, (train_x, train_y) in enumerate(zip(train_x_batch, train_y_batch)):
prediction = self.model(train_x)
GAT_prediction_tensor = torch.cat((GAT_prediction_tensor, prediction[3].float()), 0)
GCN_prediction_tensor = torch.cat((GCN_prediction_tensor, prediction[4].float()), 0)
target_tensor = torch.cat((target_tensor, torch.log2(train_y.float() + 1)), 0)
omega1 = prediction[1].data.float()
omega2 = prediction[2].data.float()
GAT_loss = torch.nn.functional.mse_loss(target_tensor,GAT_prediction_tensor)
GCN_loss = torch.nn.functional.mse_loss(target_tensor,GCN_prediction_tensor)
loss = omega1 * GAT_loss + omega2 * GCN_loss
loss.backward()
optimizer.step()
losses = losses + loss.item()
average_loss = losses / (step + 1)
print('CCasGNN train MSLE loss in ', epoch + 1, ' epoch = ', average_loss)
time_now = time.time()
print('the rest of running time about:', (((time_now-time_start)/ (epoch+1)) * (self.args.epochs - epoch)) / 60, ' minutes')
print('\n')
if (epoch + 1) % self.args.check_point == 0:
print('epoch ',epoch + 1, ' evaluating.')
self.evaluation(valid_data_x, valid_data_y)
self.test(test_data_x, test_data_y)
def evaluation(self, valid_x_batches, valid_y_batches):
self.model.eval()
losses = 0.
average_loss = 0.
for step, (valid_x_batch, valid_y_batch) in enumerate(zip(valid_x_batches, valid_y_batches)):
loss = 0.
prediction_tensor = torch.tensor([])
target_tensor = torch.tensor([])
for (valid_x, valid_y) in zip(valid_x_batch, valid_y_batch):
prediction = self.model(valid_x)
prediction_tensor = torch.cat((prediction_tensor, prediction[0].float()), 0)
target_tensor = torch.cat((target_tensor, torch.log2(valid_y.float() + 1)), 0)
loss = torch.nn.functional.mse_loss(target_tensor, prediction_tensor)
losses = losses + loss.item()
average_loss = losses / (step + 1)
print('#####CCasGNN valid MSLE loss in this epoch = ', average_loss)
print('\n')
def test(self, test_x_batches, test_y_batches):
print("\n\nScoring.\n")
self.model.eval()
losses = 0.
average_loss = 0.
all_test_tensor = torch.tensor([])
all_true_tensor = torch.tensor([])
for step, (test_x_batch, test_y_batch) in enumerate(zip(test_x_batches, test_y_batches)):
loss = 0.
prediction_tensor = torch.tensor([])
target_tensor = torch.tensor([])
for (test_x, test_y) in zip(test_x_batch, test_y_batch):
prediction = self.model(test_x)
prediction_tensor = torch.cat((prediction_tensor, prediction[0].float()), 0)
all_test_tensor = torch.cat((all_test_tensor, prediction[0].float()), dim=0)
target_tensor = torch.cat((target_tensor, torch.log2(test_y.float() + 1)), 0)
all_true_tensor = torch.cat((all_true_tensor, torch.log2(test_y.float() + 1)), dim=0)
loss = torch.nn.functional.mse_loss(target_tensor, prediction_tensor)
losses = losses + loss.item()
average_loss = losses / (step + 1)
all_test_np = all_test_tensor.detach().numpy()
all_true_np = all_true_tensor.detach().numpy()
sub_np = all_test_np - all_true_np
print('correlation: ', sci.pearsonr(sub_np, all_true_np))
print('#####CCasGNN test MSLE loss = ', average_loss)
print('\n')
| 13,767 | 50.373134 | 136 | py |
pyEPR | pyEPR-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'pyEPR'
copyright = '2020, Zlatko Minev, Zaki Leghtas, and the pyEPR Team'
author = 'Zlatko Minev, Zaki Leghtas, and the pyEPR Team'
import sys
import os
sys.path.insert(0, os.path.abspath("../../pyEPR"))
print(sys.path)
# The full version, including alpha/beta/rc tags
import pyEPR
version = pyEPR.__version__
release = version
import sphinx_rtd_theme
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
'sphinx.ext.coverage',
'sphinx.ext.napoleon', # parse both NumPy and Google style docstrings
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
"sphinx.ext.mathjax",
"sphinx_rtd_theme",
#'sphinx_automodapi.automodapi',
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"matplotlib.sphinxext.plot_directive",
#'numpydoc'
]
# https://github.com/readthedocs/readthedocs.org/issues/2569
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["**.ipynb_checkpoints"]
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
numpydoc_show_class_members = True
napoleon_numpy_docstring = True
napoleon_use_admonition_for_notes = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme' #'default' # 'sphinx_rtd_theme' #'alabaster' "sphinxdoc" 'classic'
if 0:
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
full_logo= True
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'canonical_url': '',
#'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
#'style_nav_header_background': 'white',
# Toc options
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
# Add any paths that contain custom themes here, relative to this directory.
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If false, no module index is generated.
html_use_modindex = True
html_show_sourcelink = True
# Sort members by type
#autodoc_member_order = 'groupwise'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
autosummary_generate = True
# -----------------------------------------------------------------------------
# Autodoc
# -----------------------------------------------------------------------------
#The supported options are
# 'members', 'member-order', 'undoc-members', 'private-members',
# 'special-members', 'inherited-members', 'show-inheritance', 'ignore-module-all',
# 'imported-members' and 'exclude-members'.
autodoc_default_options = {
'inherited-members': None,
#'member-order': 'bysource',
'member-order': 'alphabetical', #This value selects if automatically documented members are sorted alphabetical (value 'alphabetical'), by member type (value 'groupwise') or by source order (value 'bysource'). The default is alphabetical.
'undoc-members': True, # Members without docstrings will be left out, unless you give the undoc-members flag option:
'exclude-members': '__weakref__',
'show-inheritance' : True # , a list of base classes will be inserted just below the class signature (when used with automodule, this will be inserted for every class that is documented in the module).
}
# If true, figures, tables and code-blocks are automatically numbered if they
# have a caption.
numfig = True
# A dictionary mapping 'figure', 'table', 'code-block' and 'section' to
# strings that are used for format of figure numbers. As a special character,
# %s will be replaced to figure number.
numfig_format = {
'table': 'Table %s'
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'colorful'
# A boolean that decides whether module names are prepended to all object names
# (for object types where a “module” of some kind is defined), e.g. for
# py:function directives.
add_module_names = True
# A list of prefixes that are ignored for sorting the Python module index
# (e.g., if this is set to ['foo.'], then foo.bar is shown under B, not F).
# This can be handy if you document a project that consists of a single
# package. Works only for the HTML builder currently.
#modindex_common_prefix = ['pyEPR.'] | 7,220 | 33.716346 | 242 | py |
FUNIT | FUNIT-master/test_k_shot.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os
import numpy as np
from PIL import Image
import torch
import torch.backends.cudnn as cudnn
from torchvision import transforms
from utils import get_config
from trainer import Trainer
import argparse
os.environ['CUDA_VISIBLE_DEVICES']='1'
parser = argparse.ArgumentParser()
parser.add_argument('--config',
type=str,
default='configs/funit_animals.yaml')
parser.add_argument('--ckpt',
type=str,
default='pretrained/animal119_gen_00200000.pt')
parser.add_argument('--class_image_folder',
type=str,
default='images/n02138411')
parser.add_argument('--input',
type=str,
default='images/input_content.jpg')
parser.add_argument('--output',
type=str,
default='images/output.jpg')
opts = parser.parse_args()
cudnn.benchmark = True
opts.vis = True
config = get_config(opts.config)
config['batch_size'] = 1
config['gpus'] = 1
trainer = Trainer(config)
trainer.cuda()
trainer.load_ckpt(opts.ckpt)
trainer.eval()
transform_list = [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
transform_list = [transforms.Resize((128, 128))] + transform_list
transform = transforms.Compose(transform_list)
print('Compute average class codes for images in %s' % opts.class_image_folder)
images = os.listdir(opts.class_image_folder)
for i, f in enumerate(images):
fn = os.path.join(opts.class_image_folder, f)
img = Image.open(fn).convert('RGB')
img_tensor = transform(img).unsqueeze(0).cuda()
with torch.no_grad():
class_code = trainer.model.compute_k_style(img_tensor, 1)
if i == 0:
new_class_code = class_code
else:
new_class_code += class_code
final_class_code = new_class_code / len(images)
image = Image.open(opts.input)
image = image.convert('RGB')
content_img = transform(image).unsqueeze(0)
print('Compute translation for %s' % opts.input)
with torch.no_grad():
output_image = trainer.model.translate_simple(content_img, final_class_code)
image = output_image.detach().cpu().squeeze().numpy()
image = np.transpose(image, (1, 2, 0))
image = ((image + 1) * 0.5 * 255.0)
output_img = Image.fromarray(np.uint8(image))
output_img.save(opts.output, 'JPEG', quality=99)
print('Save output to %s' % opts.output)
| 2,618 | 31.7375 | 80 | py |
FUNIT | FUNIT-master/utils.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os
import yaml
import time
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
import torchvision.utils as vutils
from data import ImageLabelFilelist
def update_average(model_tgt, model_src, beta=0.999):
with torch.no_grad():
param_dict_src = dict(model_src.named_parameters())
for p_name, p_tgt in model_tgt.named_parameters():
p_src = param_dict_src[p_name]
assert(p_src is not p_tgt)
p_tgt.copy_(beta*p_tgt + (1. - beta)*p_src)
def loader_from_list(
root,
file_list,
batch_size,
new_size=None,
height=128,
width=128,
crop=True,
num_workers=4,
shuffle=True,
center_crop=False,
return_paths=False,
drop_last=True):
transform_list = [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
if center_crop:
transform_list = [transforms.CenterCrop((height, width))] + \
transform_list if crop else transform_list
else:
transform_list = [transforms.RandomCrop((height, width))] + \
transform_list if crop else transform_list
transform_list = [transforms.Resize(new_size)] + transform_list \
if new_size is not None else transform_list
if not center_crop:
transform_list = [transforms.RandomHorizontalFlip()] + transform_list
transform = transforms.Compose(transform_list)
dataset = ImageLabelFilelist(root,
file_list,
transform,
return_paths=return_paths)
loader = DataLoader(dataset,
batch_size,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers)
return loader
def get_evaluation_loaders(conf, shuffle_content=False):
batch_size = conf['batch_size']
num_workers = conf['num_workers']
new_size = conf['new_size']
width = conf['crop_image_width']
height = conf['crop_image_height']
content_loader = loader_from_list(
root=conf['data_folder_train'],
file_list=conf['data_list_train'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=num_workers,
shuffle=shuffle_content,
center_crop=True,
return_paths=True,
drop_last=False)
class_loader = loader_from_list(
root=conf['data_folder_test'],
file_list=conf['data_list_test'],
batch_size=batch_size * conf['k_shot'],
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=1,
shuffle=False,
center_crop=True,
return_paths=True,
drop_last=False)
return content_loader, class_loader
def get_train_loaders(conf):
batch_size = conf['batch_size']
num_workers = conf['num_workers']
new_size = conf['new_size']
width = conf['crop_image_width']
height = conf['crop_image_height']
train_content_loader = loader_from_list(
root=conf['data_folder_train'],
file_list=conf['data_list_train'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=num_workers)
train_class_loader = loader_from_list(
root=conf['data_folder_train'],
file_list=conf['data_list_train'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=num_workers)
test_content_loader = loader_from_list(
root=conf['data_folder_test'],
file_list=conf['data_list_test'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=1)
test_class_loader = loader_from_list(
root=conf['data_folder_test'],
file_list=conf['data_list_test'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=1)
return (train_content_loader, train_class_loader, test_content_loader,
test_class_loader)
def get_config(config):
with open(config, 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
def make_result_folders(output_directory):
image_directory = os.path.join(output_directory, 'images')
if not os.path.exists(image_directory):
print("Creating directory: {}".format(image_directory))
os.makedirs(image_directory)
checkpoint_directory = os.path.join(output_directory, 'checkpoints')
if not os.path.exists(checkpoint_directory):
print("Creating directory: {}".format(checkpoint_directory))
os.makedirs(checkpoint_directory)
return checkpoint_directory, image_directory
def __write_images(im_outs, dis_img_n, file_name):
im_outs = [images.expand(-1, 3, -1, -1) for images in im_outs]
image_tensor = torch.cat([images[:dis_img_n] for images in im_outs], 0)
image_grid = vutils.make_grid(image_tensor.data,
nrow=dis_img_n, padding=0, normalize=True)
vutils.save_image(image_grid, file_name, nrow=1)
def write_1images(image_outputs, image_directory, postfix):
display_image_num = image_outputs[0].size(0)
__write_images(image_outputs, display_image_num,
'%s/gen_%s.jpg' % (image_directory, postfix))
def _write_row(html_file, it, fn, all_size):
html_file.write("<h3>iteration [%d] (%s)</h3>" % (it, fn.split('/')[-1]))
html_file.write("""
<p><a href="%s">
<img src="%s" style="width:%dpx">
</a><br>
<p>
""" % (fn, fn, all_size))
return
def write_html(filename, it, img_save_it, img_dir, all_size=1536):
html_file = open(filename, "w")
html_file.write('''
<!DOCTYPE html>
<html>
<head>
<title>Experiment name = %s</title>
<meta http-equiv="refresh" content="30">
</head>
<body>
''' % os.path.basename(filename))
html_file.write("<h3>current</h3>")
_write_row(html_file, it, '%s/gen_train_current.jpg' % img_dir, all_size)
for j in range(it, img_save_it - 1, -1):
_write_row(html_file, j, '%s/gen_train_%08d.jpg' % (img_dir, j),
all_size)
html_file.write("</body></html>")
html_file.close()
def write_loss(iterations, trainer, train_writer):
members = [attr for attr in dir(trainer)
if ((not callable(getattr(trainer, attr))
and not attr.startswith("__"))
and ('loss' in attr
or 'grad' in attr
or 'nwd' in attr
or 'accuracy' in attr))]
for m in members:
train_writer.add_scalar(m, getattr(trainer, m), iterations + 1)
class Timer:
def __init__(self, msg):
self.msg = msg
self.start_time = None
def __enter__(self):
self.start_time = time.time()
def __exit__(self, exc_type, exc_value, exc_tb):
print(self.msg % (time.time() - self.start_time))
| 7,743 | 32.37931 | 77 | py |
FUNIT | FUNIT-master/data.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os.path
from PIL import Image
import torch.utils.data as data
def default_loader(path):
return Image.open(path).convert('RGB')
def default_filelist_reader(filelist):
im_list = []
with open(filelist, 'r') as rf:
for line in rf.readlines():
im_path = line.strip()
im_list.append(im_path)
return im_list
class ImageLabelFilelist(data.Dataset):
def __init__(self,
root,
filelist,
transform=None,
filelist_reader=default_filelist_reader,
loader=default_loader,
return_paths=False):
self.root = root
self.im_list = filelist_reader(os.path.join(filelist))
self.transform = transform
self.loader = loader
self.classes = sorted(
list(set([path.split('/')[0] for path in self.im_list])))
self.class_to_idx = {self.classes[i]: i for i in
range(len(self.classes))}
self.imgs = [(im_path, self.class_to_idx[im_path.split('/')[0]]) for
im_path in self.im_list]
self.return_paths = return_paths
print('Data loader')
print("\tRoot: %s" % root)
print("\tList: %s" % filelist)
print("\tNumber of classes: %d" % (len(self.classes)))
def __getitem__(self, index):
im_path, label = self.imgs[index]
path = os.path.join(self.root, im_path)
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, label, path
else:
return img, label
def __len__(self):
return len(self.imgs)
| 1,913 | 29.870968 | 76 | py |
FUNIT | FUNIT-master/networks.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import numpy as np
import torch
from torch import nn
from torch import autograd
from blocks import LinearBlock, Conv2dBlock, ResBlocks, ActFirstResBlock
def assign_adain_params(adain_params, model):
# assign the adain_params to the AdaIN layers in model
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
num_adain_params += 2*m.num_features
return num_adain_params
class GPPatchMcResDis(nn.Module):
def __init__(self, hp):
super(GPPatchMcResDis, self).__init__()
assert hp['n_res_blks'] % 2 == 0, 'n_res_blk must be multiples of 2'
self.n_layers = hp['n_res_blks'] // 2
nf = hp['nf']
cnn_f = [Conv2dBlock(3, nf, 7, 1, 3,
pad_type='reflect',
norm='none',
activation='none')]
for i in range(self.n_layers - 1):
nf_out = np.min([nf * 2, 1024])
cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
cnn_f += [nn.ReflectionPad2d(1)]
cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)]
nf = np.min([nf * 2, 1024])
nf_out = np.min([nf * 2, 1024])
cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
cnn_c = [Conv2dBlock(nf_out, hp['num_classes'], 1, 1,
norm='none',
activation='lrelu',
activation_first=True)]
self.cnn_f = nn.Sequential(*cnn_f)
self.cnn_c = nn.Sequential(*cnn_c)
def forward(self, x, y):
assert(x.size(0) == y.size(0))
feat = self.cnn_f(x)
out = self.cnn_c(feat)
index = torch.LongTensor(range(out.size(0))).cuda()
out = out[index, y, :, :]
return out, feat
def calc_dis_fake_loss(self, input_fake, input_label):
resp_fake, gan_feat = self.forward(input_fake, input_label)
total_count = torch.tensor(np.prod(resp_fake.size()),
dtype=torch.float).cuda()
fake_loss = torch.nn.ReLU()(1.0 + resp_fake).mean()
correct_count = (resp_fake < 0).sum()
fake_accuracy = correct_count.type_as(fake_loss) / total_count
return fake_loss, fake_accuracy, resp_fake
def calc_dis_real_loss(self, input_real, input_label):
resp_real, gan_feat = self.forward(input_real, input_label)
total_count = torch.tensor(np.prod(resp_real.size()),
dtype=torch.float).cuda()
real_loss = torch.nn.ReLU()(1.0 - resp_real).mean()
correct_count = (resp_real >= 0).sum()
real_accuracy = correct_count.type_as(real_loss) / total_count
return real_loss, real_accuracy, resp_real
def calc_gen_loss(self, input_fake, input_fake_label):
resp_fake, gan_feat = self.forward(input_fake, input_fake_label)
total_count = torch.tensor(np.prod(resp_fake.size()),
dtype=torch.float).cuda()
loss = -resp_fake.mean()
correct_count = (resp_fake >= 0).sum()
accuracy = correct_count.type_as(loss) / total_count
return loss, accuracy, gan_feat
def calc_grad2(self, d_out, x_in):
batch_size = x_in.size(0)
grad_dout = autograd.grad(outputs=d_out.mean(),
inputs=x_in,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
grad_dout2 = grad_dout.pow(2)
assert (grad_dout2.size() == x_in.size())
reg = grad_dout2.sum()/batch_size
return reg
class FewShotGen(nn.Module):
def __init__(self, hp):
super(FewShotGen, self).__init__()
nf = hp['nf']
nf_mlp = hp['nf_mlp']
down_class = hp['n_downs_class']
down_content = hp['n_downs_content']
n_mlp_blks = hp['n_mlp_blks']
n_res_blks = hp['n_res_blks']
latent_dim = hp['latent_dim']
self.enc_class_model = ClassModelEncoder(down_class,
3,
nf,
latent_dim,
norm='none',
activ='relu',
pad_type='reflect')
self.enc_content = ContentEncoder(down_content,
n_res_blks,
3,
nf,
'in',
activ='relu',
pad_type='reflect')
self.dec = Decoder(down_content,
n_res_blks,
self.enc_content.output_dim,
3,
res_norm='adain',
activ='relu',
pad_type='reflect')
self.mlp = MLP(latent_dim,
get_num_adain_params(self.dec),
nf_mlp,
n_mlp_blks,
norm='none',
activ='relu')
def forward(self, one_image, model_set):
# reconstruct an image
content, model_codes = self.encode(one_image, model_set)
model_code = torch.mean(model_codes, dim=0).unsqueeze(0)
images_trans = self.decode(content, model_code)
return images_trans
def encode(self, one_image, model_set):
# extract content code from the input image
content = self.enc_content(one_image)
# extract model code from the images in the model set
class_codes = self.enc_class_model(model_set)
class_code = torch.mean(class_codes, dim=0).unsqueeze(0)
return content, class_code
def decode(self, content, model_code):
# decode content and style codes to an image
adain_params = self.mlp(model_code)
assign_adain_params(adain_params, self.dec)
images = self.dec(content)
return images
class ClassModelEncoder(nn.Module):
def __init__(self, downs, ind_im, dim, latent_dim, norm, activ, pad_type):
super(ClassModelEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(ind_im, dim, 7, 1, 3,
norm=norm,
activation=activ,
pad_type=pad_type)]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1,
norm=norm,
activation=activ,
pad_type=pad_type)]
dim *= 2
for i in range(downs - 2):
self.model += [Conv2dBlock(dim, dim, 4, 2, 1,
norm=norm,
activation=activ,
pad_type=pad_type)]
self.model += [nn.AdaptiveAvgPool2d(1)]
self.model += [nn.Conv2d(dim, latent_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
return self.model(x)
class ContentEncoder(nn.Module):
def __init__(self, downs, n_res, input_dim, dim, norm, activ, pad_type):
super(ContentEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3,
norm=norm,
activation=activ,
pad_type=pad_type)]
for i in range(downs):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1,
norm=norm,
activation=activ,
pad_type=pad_type)]
dim *= 2
self.model += [ResBlocks(n_res, dim,
norm=norm,
activation=activ,
pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
return self.model(x)
class Decoder(nn.Module):
def __init__(self, ups, n_res, dim, out_dim, res_norm, activ, pad_type):
super(Decoder, self).__init__()
self.model = []
self.model += [ResBlocks(n_res, dim, res_norm,
activ, pad_type=pad_type)]
for i in range(ups):
self.model += [nn.Upsample(scale_factor=2),
Conv2dBlock(dim, dim // 2, 5, 1, 2,
norm='in',
activation=activ,
pad_type=pad_type)]
dim //= 2
self.model += [Conv2dBlock(dim, out_dim, 7, 1, 3,
norm='none',
activation='tanh',
pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class MLP(nn.Module):
def __init__(self, in_dim, out_dim, dim, n_blk, norm, activ):
super(MLP, self).__init__()
self.model = []
self.model += [LinearBlock(in_dim, dim, norm=norm, activation=activ)]
for i in range(n_blk - 2):
self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)]
self.model += [LinearBlock(dim, out_dim,
norm='none', activation='none')]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x.view(x.size(0), -1))
| 10,860 | 39.830827 | 78 | py |
FUNIT | FUNIT-master/funit_model.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import copy
import torch
import torch.nn as nn
from networks import FewShotGen, GPPatchMcResDis
def recon_criterion(predict, target):
return torch.mean(torch.abs(predict - target))
class FUNITModel(nn.Module):
def __init__(self, hp):
super(FUNITModel, self).__init__()
self.gen = FewShotGen(hp['gen'])
self.dis = GPPatchMcResDis(hp['dis'])
self.gen_test = copy.deepcopy(self.gen)
def forward(self, co_data, cl_data, hp, mode):
xa = co_data[0].cuda()
la = co_data[1].cuda()
xb = cl_data[0].cuda()
lb = cl_data[1].cuda()
if mode == 'gen_update':
# INSERT here the object detection
# then, for each objects, keep the bounding box and encode it,
# then merge (in some way, maybe replace the features), in the encoding
# of the whole image, the encoding of each objects using its bounding box
# information
c_xa = self.gen.enc_content(xa)
s_xa = self.gen.enc_class_model(xa)
s_xb = self.gen.enc_class_model(xb)
xt = self.gen.decode(c_xa, s_xb) # translation
xr = self.gen.decode(c_xa, s_xa) # reconstruction
l_adv_t, gacc_t, xt_gan_feat = self.dis.calc_gen_loss(xt, lb)
l_adv_r, gacc_r, xr_gan_feat = self.dis.calc_gen_loss(xr, la)
_, xb_gan_feat = self.dis(xb, lb)
_, xa_gan_feat = self.dis(xa, la)
l_c_rec = recon_criterion(xr_gan_feat.mean(3).mean(2),
xa_gan_feat.mean(3).mean(2))
l_m_rec = recon_criterion(xt_gan_feat.mean(3).mean(2),
xb_gan_feat.mean(3).mean(2))
l_x_rec = recon_criterion(xr, xa)
l_adv = 0.5 * (l_adv_t + l_adv_r)
acc = 0.5 * (gacc_t + gacc_r)
l_total = (hp['gan_w'] * l_adv + hp['r_w'] * l_x_rec + hp[
'fm_w'] * (l_c_rec + l_m_rec))
l_total.backward()
return l_total, l_adv, l_x_rec, l_c_rec, l_m_rec, acc
elif mode == 'dis_update':
xb.requires_grad_()
l_real_pre, acc_r, resp_r = self.dis.calc_dis_real_loss(xb, lb)
l_real = hp['gan_w'] * l_real_pre
l_real.backward(retain_graph=True)
l_reg_pre = self.dis.calc_grad2(resp_r, xb)
l_reg = 10 * l_reg_pre
l_reg.backward()
with torch.no_grad():
c_xa = self.gen.enc_content(xa)
s_xb = self.gen.enc_class_model(xb)
xt = self.gen.decode(c_xa, s_xb)
l_fake_p, acc_f, resp_f = self.dis.calc_dis_fake_loss(xt.detach(),
lb)
l_fake = hp['gan_w'] * l_fake_p
l_fake.backward()
l_total = l_fake + l_real + l_reg
acc = 0.5 * (acc_f + acc_r)
return l_total, l_fake_p, l_real_pre, l_reg_pre, acc
else:
assert 0, 'Not support operation'
def test(self, co_data, cl_data):
self.eval()
self.gen.eval()
self.gen_test.eval()
xa = co_data[0].cuda()
xb = cl_data[0].cuda()
c_xa_current = self.gen.enc_content(xa)
s_xa_current = self.gen.enc_class_model(xa)
s_xb_current = self.gen.enc_class_model(xb)
xt_current = self.gen.decode(c_xa_current, s_xb_current)
xr_current = self.gen.decode(c_xa_current, s_xa_current)
c_xa = self.gen_test.enc_content(xa)
s_xa = self.gen_test.enc_class_model(xa)
s_xb = self.gen_test.enc_class_model(xb)
xt = self.gen_test.decode(c_xa, s_xb)
xr = self.gen_test.decode(c_xa, s_xa)
self.train()
return xa, xr_current, xt_current, xb, xr, xt
def translate_k_shot(self, co_data, cl_data, k):
self.eval()
xa = co_data[0].cuda()
xb = cl_data[0].cuda()
c_xa_current = self.gen_test.enc_content(xa)
if k == 1:
c_xa_current = self.gen_test.enc_content(xa)
s_xb_current = self.gen_test.enc_class_model(xb)
xt_current = self.gen_test.decode(c_xa_current, s_xb_current)
else:
s_xb_current_before = self.gen_test.enc_class_model(xb)
s_xb_current_after = s_xb_current_before.squeeze(-1).permute(1,
2,
0)
s_xb_current_pool = torch.nn.functional.avg_pool1d(
s_xb_current_after, k)
s_xb_current = s_xb_current_pool.permute(2, 0, 1).unsqueeze(-1)
xt_current = self.gen_test.decode(c_xa_current, s_xb_current)
return xt_current
def compute_k_style(self, style_batch, k):
self.eval()
style_batch = style_batch.cuda()
s_xb_before = self.gen_test.enc_class_model(style_batch)
s_xb_after = s_xb_before.squeeze(-1).permute(1, 2, 0)
s_xb_pool = torch.nn.functional.avg_pool1d(s_xb_after, k)
s_xb = s_xb_pool.permute(2, 0, 1).unsqueeze(-1)
return s_xb
def translate_simple(self, content_image, class_code):
self.eval()
xa = content_image.cuda()
s_xb_current = class_code.cuda()
c_xa_current = self.gen_test.enc_content(xa)
xt_current = self.gen_test.decode(c_xa_current, s_xb_current)
return xt_current
| 5,659 | 41.238806 | 85 | py |
FUNIT | FUNIT-master/train.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
import os
import sys
import argparse
import shutil
from tensorboardX import SummaryWriter
from utils import get_config, get_train_loaders, make_result_folders
from utils import write_loss, write_html, write_1images, Timer
from trainer import Trainer
import torch.backends.cudnn as cudnn
# Enable auto-tuner to find the best algorithm to use for your hardware.
cudnn.benchmark = True
# SAMUEL 18.03.2021 CUDA GPU setting
os.environ['CUDA_VISIBLE_DEVICES']='1'
# SAMUEL 18.03.2021 CUDA GPU setting
parser = argparse.ArgumentParser()
parser.add_argument('--config',
type=str,
default='configs/funit_animals.yaml',
help='configuration file for training and testing')
parser.add_argument('--output_path',
type=str,
default='.',
help="outputs path")
parser.add_argument('--multigpus',
action="store_true")
parser.add_argument('--batch_size',
type=int,
default=0)
parser.add_argument('--test_batch_size',
type=int,
default=4)
parser.add_argument("--resume",
action="store_true")
opts = parser.parse_args()
# Load experiment setting
config = get_config(opts.config)
max_iter = config['max_iter']
# Override the batch size if specified.
if opts.batch_size != 0:
config['batch_size'] = opts.batch_size
trainer = Trainer(config)
trainer.cuda()
if opts.multigpus:
ngpus = torch.cuda.device_count()
config['gpus'] = ngpus
print("Number of GPUs: %d" % ngpus)
trainer.model = torch.nn.DataParallel(
trainer.model, device_ids=range(ngpus))
else:
config['gpus'] = 1
loaders = get_train_loaders(config)
train_content_loader = loaders[0]
train_class_loader = loaders[1]
test_content_loader = loaders[2]
test_class_loader = loaders[3]
# Setup logger and output folders
model_name = os.path.splitext(os.path.basename(opts.config))[0]
train_writer = SummaryWriter(
os.path.join(opts.output_path + "/logs", model_name))
output_directory = os.path.join(opts.output_path + "/outputs", model_name)
checkpoint_directory, image_directory = make_result_folders(output_directory)
shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml'))
iterations = trainer.resume(checkpoint_directory,
hp=config,
multigpus=opts.multigpus) if opts.resume else 0
while True:
for it, (co_data, cl_data) in enumerate(
zip(train_content_loader, train_class_loader)):
with Timer("Elapsed time in update: %f"):
d_acc = trainer.dis_update(co_data, cl_data, config)
g_acc = trainer.gen_update(co_data, cl_data, config,
opts.multigpus)
torch.cuda.synchronize()
print('D acc: %.4f\t G acc: %.4f' % (d_acc, g_acc))
if (iterations + 1) % config['log_iter'] == 0:
print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
write_loss(iterations, trainer, train_writer)
if ((iterations + 1) % config['image_save_iter'] == 0 or (
iterations + 1) % config['image_display_iter'] == 0):
if (iterations + 1) % config['image_save_iter'] == 0:
key_str = '%08d' % (iterations + 1)
write_html(output_directory + "/index.html", iterations + 1,
config['image_save_iter'], 'images')
else:
key_str = 'current'
with torch.no_grad():
for t, (val_co_data, val_cl_data) in enumerate(
zip(train_content_loader, train_class_loader)):
if t >= opts.test_batch_size:
break
val_image_outputs = trainer.test(val_co_data, val_cl_data,
opts.multigpus)
write_1images(val_image_outputs, image_directory,
'train_%s_%02d' % (key_str, t))
for t, (test_co_data, test_cl_data) in enumerate(
zip(test_content_loader, test_class_loader)):
if t >= opts.test_batch_size:
break
test_image_outputs = trainer.test(test_co_data,
test_cl_data,
opts.multigpus)
write_1images(test_image_outputs, image_directory,
'test_%s_%02d' % (key_str, t))
if (iterations + 1) % config['snapshot_save_iter'] == 0:
trainer.save(checkpoint_directory, iterations, opts.multigpus)
print('Saved model at iteration %d' % (iterations + 1))
iterations += 1
if iterations >= max_iter:
print("Finish Training")
sys.exit(0)
| 5,178 | 37.93985 | 78 | py |
FUNIT | FUNIT-master/trainer.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import copy
import os
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.optim import lr_scheduler
from funit_model import FUNITModel
def update_average(model_tgt, model_src, beta=0.999):
with torch.no_grad():
param_dict_src = dict(model_src.named_parameters())
for p_name, p_tgt in model_tgt.named_parameters():
p_src = param_dict_src[p_name]
assert(p_src is not p_tgt)
p_tgt.copy_(beta*p_tgt + (1. - beta)*p_src)
class Trainer(nn.Module):
def __init__(self, cfg):
super(Trainer, self).__init__()
self.model = FUNITModel(cfg)
lr_gen = cfg['lr_gen']
lr_dis = cfg['lr_dis']
dis_params = list(self.model.dis.parameters())
gen_params = list(self.model.gen.parameters())
self.dis_opt = torch.optim.RMSprop(
[p for p in dis_params if p.requires_grad],
lr=lr_gen, weight_decay=cfg['weight_decay'])
self.gen_opt = torch.optim.RMSprop(
[p for p in gen_params if p.requires_grad],
lr=lr_dis, weight_decay=cfg['weight_decay'])
self.dis_scheduler = get_scheduler(self.dis_opt, cfg)
self.gen_scheduler = get_scheduler(self.gen_opt, cfg)
self.apply(weights_init(cfg['init']))
self.model.gen_test = copy.deepcopy(self.model.gen)
def gen_update(self, co_data, cl_data, hp, multigpus):
self.gen_opt.zero_grad()
al, ad, xr, cr, sr, ac = self.model(co_data, cl_data, hp, 'gen_update')
self.loss_gen_total = torch.mean(al)
self.loss_gen_recon_x = torch.mean(xr)
self.loss_gen_recon_c = torch.mean(cr)
self.loss_gen_recon_s = torch.mean(sr)
self.loss_gen_adv = torch.mean(ad)
self.accuracy_gen_adv = torch.mean(ac)
self.gen_opt.step()
this_model = self.model.module if multigpus else self.model
update_average(this_model.gen_test, this_model.gen)
return self.accuracy_gen_adv.item()
def dis_update(self, co_data, cl_data, hp):
self.dis_opt.zero_grad()
al, lfa, lre, reg, acc = self.model(co_data, cl_data, hp, 'dis_update')
self.loss_dis_total = torch.mean(al)
self.loss_dis_fake_adv = torch.mean(lfa)
self.loss_dis_real_adv = torch.mean(lre)
self.loss_dis_reg = torch.mean(reg)
self.accuracy_dis_adv = torch.mean(acc)
self.dis_opt.step()
return self.accuracy_dis_adv.item()
def test(self, co_data, cl_data, multigpus):
this_model = self.model.module if multigpus else self.model
return this_model.test(co_data, cl_data)
def resume(self, checkpoint_dir, hp, multigpus):
this_model = self.model.module if multigpus else self.model
last_model_name = get_model_list(checkpoint_dir, "gen")
state_dict = torch.load(last_model_name)
this_model.gen.load_state_dict(state_dict['gen'])
this_model.gen_test.load_state_dict(state_dict['gen_test'])
iterations = int(last_model_name[-11:-3])
last_model_name = get_model_list(checkpoint_dir, "dis")
state_dict = torch.load(last_model_name)
this_model.dis.load_state_dict(state_dict['dis'])
state_dict = torch.load(os.path.join(checkpoint_dir, 'optimizer.pt'))
self.dis_opt.load_state_dict(state_dict['dis'])
self.gen_opt.load_state_dict(state_dict['gen'])
self.dis_scheduler = get_scheduler(self.dis_opt, hp, iterations)
self.gen_scheduler = get_scheduler(self.gen_opt, hp, iterations)
print('Resume from iteration %d' % iterations)
return iterations
def save(self, snapshot_dir, iterations, multigpus):
this_model = self.model.module if multigpus else self.model
# Save generators, discriminators, and optimizers
gen_name = os.path.join(snapshot_dir, 'gen_%08d.pt' % (iterations + 1))
dis_name = os.path.join(snapshot_dir, 'dis_%08d.pt' % (iterations + 1))
opt_name = os.path.join(snapshot_dir, 'optimizer.pt')
torch.save({'gen': this_model.gen.state_dict(),
'gen_test': this_model.gen_test.state_dict()}, gen_name)
torch.save({'dis': this_model.dis.state_dict()}, dis_name)
torch.save({'gen': self.gen_opt.state_dict(),
'dis': self.dis_opt.state_dict()}, opt_name)
def load_ckpt(self, ckpt_name):
state_dict = torch.load(ckpt_name)
self.model.gen.load_state_dict(state_dict['gen'])
self.model.gen_test.load_state_dict(state_dict['gen_test'])
def translate(self, co_data, cl_data):
return self.model.translate(co_data, cl_data)
def translate_k_shot(self, co_data, cl_data, k, mode):
return self.model.translate_k_shot(co_data, cl_data, k, mode)
def forward(self, *inputs):
print('Forward function not implemented.')
pass
def get_model_list(dirname, key):
if os.path.exists(dirname) is False:
return None
gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if
os.path.isfile(os.path.join(dirname, f)) and
key in f and ".pt" in f]
if gen_models is None:
return None
gen_models.sort()
last_model_name = gen_models[-1]
return last_model_name
def get_scheduler(optimizer, hp, it=-1):
if 'lr_policy' not in hp or hp['lr_policy'] == 'constant':
scheduler = None # constant scheduler
elif hp['lr_policy'] == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=hp['step_size'],
gamma=hp['gamma'], last_epoch=it)
else:
return NotImplementedError('%s not implemented', hp['lr_policy'])
return scheduler
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find(
'Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'gaussian':
init.normal_(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
return init_fun
| 6,871 | 39.662722 | 79 | py |
FUNIT | FUNIT-master/blocks.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
import torch.nn.functional as F
from torch import nn
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm, activation, pad_type):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim,
norm=norm,
activation=activation,
pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class ResBlock(nn.Module):
def __init__(self, dim, norm='in', activation='relu', pad_type='zero'):
super(ResBlock, self).__init__()
model = []
model += [Conv2dBlock(dim, dim, 3, 1, 1,
norm=norm,
activation=activation,
pad_type=pad_type)]
model += [Conv2dBlock(dim, dim, 3, 1, 1,
norm=norm,
activation='none',
pad_type=pad_type)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class ActFirstResBlock(nn.Module):
def __init__(self, fin, fout, fhid=None,
activation='lrelu', norm='none'):
super().__init__()
self.learned_shortcut = (fin != fout)
self.fin = fin
self.fout = fout
self.fhid = min(fin, fout) if fhid is None else fhid
self.conv_0 = Conv2dBlock(self.fin, self.fhid, 3, 1,
padding=1, pad_type='reflect', norm=norm,
activation=activation, activation_first=True)
self.conv_1 = Conv2dBlock(self.fhid, self.fout, 3, 1,
padding=1, pad_type='reflect', norm=norm,
activation=activation, activation_first=True)
if self.learned_shortcut:
self.conv_s = Conv2dBlock(self.fin, self.fout, 1, 1,
activation='none', use_bias=False)
def forward(self, x):
x_s = self.conv_s(x) if self.learned_shortcut else x
dx = self.conv_0(x)
dx = self.conv_1(dx)
out = x_s + dx
return out
class LinearBlock(nn.Module):
def __init__(self, in_dim, out_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
self.fc = nn.Linear(in_dim, out_dim, bias=use_bias)
# initialize normalization
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=False)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=False)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
class Conv2dBlock(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0,
norm='none', activation='relu', pad_type='zero',
use_bias=True, activation_first=False):
super(Conv2dBlock, self).__init__()
self.use_bias = use_bias
self.activation_first = activation_first
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=False)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=False)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
def forward(self, x):
if self.activation_first:
if self.activation:
x = self.activation(x)
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
else:
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and \
self.bias is not None, "Please assign AdaIN weight first"
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(
x_reshaped, running_mean, running_var, self.weight, self.bias,
True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
| 6,986 | 34.647959 | 79 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/main.py | import argparse
import time
import math
from os import path, makedirs
import torch
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.backends import cudnn
from torchvision import datasets
from torchvision import transforms
from simsiam.loader import TwoCropsTransform
from simsiam.model_factory import SimSiam
from simsiam.criterion import SimSiamLoss
from simsiam.validation import KNNValidation
parser = argparse.ArgumentParser('arguments for training')
parser.add_argument('--data_root', type=str, help='path to dataset directory')
parser.add_argument('--exp_dir', type=str, help='path to experiment directory')
parser.add_argument('--trial', type=str, default='1', help='trial id')
parser.add_argument('--img_dim', default=32, type=int)
parser.add_argument('--arch', default='resnet18', help='model name is used for training')
parser.add_argument('--feat_dim', default=2048, type=int, help='feature dimension')
parser.add_argument('--num_proj_layers', type=int, default=2, help='number of projection layer')
parser.add_argument('--batch_size', type=int, default=512, help='batch_size')
parser.add_argument('--num_workers', type=int, default=8, help='num of workers to use')
parser.add_argument('--epochs', type=int, default=800, help='number of training epochs')
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--loss_version', default='simplified', type=str,
choices=['simplified', 'original'],
help='do the same thing but simplified version is much faster. ()')
parser.add_argument('--print_freq', default=10, type=int, help='print frequency')
parser.add_argument('--eval_freq', default=5, type=int, help='evaluate model frequency')
parser.add_argument('--save_freq', default=50, type=int, help='save model frequency')
parser.add_argument('--resume', default=None, type=str, help='path to latest checkpoint')
parser.add_argument('--learning_rate', type=float, default=0.05, help='learning rate')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
args = parser.parse_args()
def main():
if not path.exists(args.exp_dir):
makedirs(args.exp_dir)
trial_dir = path.join(args.exp_dir, args.trial)
logger = SummaryWriter(trial_dir)
print(vars(args))
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(args.img_dim, scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
train_set = datasets.CIFAR10(root=args.data_root,
train=True,
download=True,
transform=TwoCropsTransform(train_transforms))
train_loader = DataLoader(dataset=train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
model = SimSiam(args)
optimizer = optim.SGD(model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
criterion = SimSiamLoss(args.loss_version)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
criterion = criterion.cuda(args.gpu)
cudnn.benchmark = True
start_epoch = 1
if args.resume is not None:
if path.isfile(args.resume):
start_epoch, model, optimizer = load_checkpoint(model, optimizer, args.resume)
print("Loaded checkpoint '{}' (epoch {})"
.format(args.resume, start_epoch))
else:
print("No checkpoint found at '{}'".format(args.resume))
# routine
best_acc = 0.0
validation = KNNValidation(args, model.encoder)
for epoch in range(start_epoch, args.epochs+1):
adjust_learning_rate(optimizer, epoch, args)
print("Training...")
# train for one epoch
train_loss = train(train_loader, model, criterion, optimizer, epoch, args)
logger.add_scalar('Loss/train', train_loss, epoch)
if epoch % args.eval_freq == 0:
print("Validating...")
val_top1_acc = validation.eval()
print('Top1: {}'.format(val_top1_acc))
# save the best model
if val_top1_acc > best_acc:
best_acc = val_top1_acc
save_checkpoint(epoch, model, optimizer, best_acc,
path.join(trial_dir, '{}_best.pth'.format(args.trial)),
'Saving the best model!')
logger.add_scalar('Acc/val_top1', val_top1_acc, epoch)
# save the model
if epoch % args.save_freq == 0:
save_checkpoint(epoch, model, optimizer, val_top1_acc,
path.join(trial_dir, 'ckpt_epoch_{}_{}.pth'.format(epoch, args.trial)),
'Saving...')
print('Best accuracy:', best_acc)
# save model
save_checkpoint(epoch, model, optimizer, val_top1_acc,
path.join(trial_dir, '{}_last.pth'.format(args.trial)),
'Saving the model at the last epoch.')
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(
len(train_loader),
[batch_time, losses],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, _) in enumerate(train_loader):
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
# compute output
outs = model(im_aug1=images[0], im_aug2=images[1])
loss = criterion(outs['z1'], outs['z2'], outs['p1'], outs['p2'])
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
losses.update(loss.item(), images[0].size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
return losses.avg
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.learning_rate
# cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def save_checkpoint(epoch, model, optimizer, acc, filename, msg):
state = {
'epoch': epoch,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'top1_acc': acc
}
torch.save(state, filename)
print(msg)
def load_checkpoint(model, optimizer, filename):
checkpoint = torch.load(filename, map_location='cuda:0')
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
return start_epoch, model, optimizer
if __name__ == '__main__':
main()
| 9,087 | 33.687023 | 99 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/main_lincls.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from simsiam.resnet_cifar import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from PIL import Image
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('--num_cls', default=10, type=int, metavar='N',
help='number of classes in dataset (output dimention of models)')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch_size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning_rate', default=30., type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[60, 80], nargs='*', type=int,
help='learning rate schedule (when to drop lr by a ratio)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight_decay', default=0., type=float,
metavar='W', help='weight decay (default: 0.)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=500, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist_url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--pretrained', default='', type=str, help='path to pretrained checkpoint')
def get_backbone(backbone_name, num_cls=10):
models = {'resnet18': ResNet18(low_dim=num_cls),
'resnet34': ResNet34(low_dim=num_cls),
'resnet50': ResNet50(low_dim=num_cls),
'resnet101': ResNet101(low_dim=num_cls),
'resnet152': ResNet152(low_dim=num_cls)}
return models[backbone_name]
best_acc1 = 0
def main():
args = parser.parse_args()
print(vars(args))
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
print("=> creating model '{}'".format(args.arch))
# model = models.__dict__[args.arch]()
model = get_backbone(args.arch, args.num_cls)
# freeze all layers but the last fc
for name, param in model.named_parameters():
if name not in ['fc.weight', 'fc.bias']:
param.requires_grad = False
# init the fc layer
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
# load from pre-trained, before DistributedDataParallel constructor
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
state_dict = checkpoint['state_dict']
new_state_dict = dict()
for old_key, value in state_dict.items():
if old_key.startswith('backbone') and 'fc' not in old_key:
new_key = old_key.replace('backbone.', '')
new_state_dict[new_key] = value
args.start_epoch = 0
msg = model.load_state_dict(new_state_dict, strict=False)
assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optimize only the linear classifier
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
assert len(parameters) == 2 # fc.weight, fc.bias
optimizer = torch.optim.SGD(parameters, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
transform_train = transforms.Compose([
transforms.RandomResizedCrop(32, scale=(0.8, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
transform_test = transforms.Compose([
transforms.Resize(int(32 * (8 / 7)), interpolation=Image.BICUBIC),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
trainset = datasets.CIFAR10(args.data, train=True, transform=transform_train)
valset = datasets.CIFAR10(args.data, train=False, transform=transform_test)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
else:
train_sampler = None
train_loader = DataLoader(trainset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
sampler=train_sampler,
pin_memory=True,
drop_last=True)
val_loader = DataLoader(valset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best)
if epoch == args.start_epoch:
sanity_check(model.state_dict(), args.pretrained)
print('Best acc:', best_acc1)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
"""
Switch to eval mode:
Under the protocol of linear classification on frozen features/models,
it is not legitimate to change any part of the pre-trained model.
BatchNorm in train mode may revise running mean/std (even if it receives
no gradient), which are part of the model parameters too.
"""
model.eval()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def sanity_check(state_dict, pretrained_weights):
"""
Linear classifier should not change any weights other than the linear layer.
This sanity check asserts nothing wrong happens (e.g., BN stats updated).
"""
print("=> loading '{}' for sanity check".format(pretrained_weights))
checkpoint = torch.load(pretrained_weights, map_location="cpu")
state_dict_pre = checkpoint['state_dict']
for k in list(state_dict.keys()):
# only ignore fc layer
if 'fc.weight' in k or 'fc.bias' in k:
continue
k_pre = 'backbone.' + k[len('module.'):] \
if k.startswith('module.') else 'backbone.' + k
assert ((state_dict[k].cpu() == state_dict_pre[k_pre]).all()), \
'{} is changed in linear classifier training.'.format(k)
print("=> sanity check passed.")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 20,587 | 38.066414 | 95 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/simsiam/model_factory.py | from torch import nn
from .resnet_cifar import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
class projection_MLP(nn.Module):
def __init__(self, in_dim, out_dim, num_layers=2):
super().__init__()
hidden_dim = out_dim
self.num_layers = num_layers
self.layer1 = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.layer3 = nn.Sequential(
nn.Linear(hidden_dim, out_dim),
nn.BatchNorm1d(out_dim, affine=False) # Page:5, Paragraph:2
)
def forward(self, x):
if self.num_layers == 2:
x = self.layer1(x)
x = self.layer3(x)
elif self.num_layers == 3:
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x
class prediction_MLP(nn.Module):
def __init__(self, in_dim=2048):
super().__init__()
out_dim = in_dim
hidden_dim = int(out_dim / 4)
self.layer1 = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
class SimSiam(nn.Module):
def __init__(self, args):
super(SimSiam, self).__init__()
self.backbone = SimSiam.get_backbone(args.arch)
out_dim = self.backbone.fc.weight.shape[1]
self.backbone.fc = nn.Identity()
self.projector = projection_MLP(out_dim, args.feat_dim,
args.num_proj_layers)
self.encoder = nn.Sequential(
self.backbone,
self.projector
)
self.predictor = prediction_MLP(args.feat_dim)
@staticmethod
def get_backbone(backbone_name):
return {'resnet18': ResNet18(),
'resnet34': ResNet34(),
'resnet50': ResNet50(),
'resnet101': ResNet101(),
'resnet152': ResNet152()}[backbone_name]
def forward(self, im_aug1, im_aug2):
z1 = self.encoder(im_aug1)
z2 = self.encoder(im_aug2)
p1 = self.predictor(z1)
p2 = self.predictor(z2)
return {'z1': z1, 'z2': z2, 'p1': p1, 'p2': p2}
| 2,575 | 24.76 | 76 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/simsiam/validation.py | # https://github.com/zhirongw/lemniscate.pytorch/blob/master/test.py
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from torch import nn
class KNNValidation(object):
def __init__(self, args, model, K=1):
self.model = model
self.device = torch.device('cuda' if next(model.parameters()).is_cuda else 'cpu')
self.args = args
self.K = K
base_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = datasets.CIFAR10(root=args.data_root,
train=True,
download=True,
transform=base_transforms)
self.train_dataloader = DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
val_dataset = datasets.CIFAR10(root=args.data_root,
train=False,
download=True,
transform=base_transforms)
self.val_dataloader = DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
def _topk_retrieval(self):
"""Extract features from validation split and search on train split features."""
n_data = self.train_dataloader.dataset.data.shape[0]
feat_dim = self.args.feat_dim
self.model.eval()
if str(self.device) == 'cuda':
torch.cuda.empty_cache()
train_features = torch.zeros([feat_dim, n_data], device=self.device)
with torch.no_grad():
for batch_idx, (inputs, _) in enumerate(self.train_dataloader):
inputs = inputs.to(self.device)
batch_size = inputs.size(0)
# forward
features = self.model(inputs)
features = nn.functional.normalize(features)
train_features[:, batch_idx * batch_size:batch_idx * batch_size + batch_size] = features.data.t()
train_labels = torch.LongTensor(self.train_dataloader.dataset.targets).cuda()
total = 0
correct = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(self.val_dataloader):
targets = targets.cuda(non_blocking=True)
batch_size = inputs.size(0)
features = self.model(inputs.to(self.device))
dist = torch.mm(features, train_features)
yd, yi = dist.topk(self.K, dim=1, largest=True, sorted=True)
candidates = train_labels.view(1, -1).expand(batch_size, -1)
retrieval = torch.gather(candidates, 1, yi)
retrieval = retrieval.narrow(1, 0, 1).clone().view(-1)
total += targets.size(0)
correct += retrieval.eq(targets.data).sum().item()
top1 = correct / total
return top1
def eval(self):
return self._topk_retrieval()
| 3,662 | 38.815217 | 113 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/simsiam/resnet_cifar.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
# from lib.normalize import Normalize
from torch.autograd import Variable
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, low_dim=128):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc = nn.Linear(512*block.expansion, low_dim)
# self.l2norm = Normalize(2)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fc(out)
# out = self.l2norm(out)
return out
def ResNet18(low_dim=128):
return ResNet(BasicBlock, [2,2,2,2], low_dim)
def ResNet34(low_dim=128):
return ResNet(BasicBlock, [3,4,6,3], low_dim)
def ResNet50(low_dim=128):
return ResNet(Bottleneck, [3,4,6,3], low_dim)
def ResNet101(low_dim=128):
return ResNet(Bottleneck, [3,4,23,3], low_dim)
def ResNet152(low_dim=128):
return ResNet(Bottleneck, [3,8,36,3], low_dim)
def test():
net = ResNet18()
y = net(Variable(torch.randn(1,3,32,32)))
print(y.size())
# test()
| 4,245 | 32.433071 | 102 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/simsiam/criterion.py | from torch import nn
class SimSiamLoss(nn.Module):
def __init__(self, version='simplified'):
super().__init__()
self.ver = version
def asymmetric_loss(self, p, z):
if self.ver == 'original':
z = z.detach() # stop gradient
p = nn.functional.normalize(p, dim=1)
z = nn.functional.normalize(z, dim=1)
return -(p * z).sum(dim=1).mean()
elif self.ver == 'simplified':
z = z.detach() # stop gradient
return - nn.functional.cosine_similarity(p, z, dim=-1).mean()
def forward(self, z1, z2, p1, p2):
loss1 = self.asymmetric_loss(p1, z2)
loss2 = self.asymmetric_loss(p2, z1)
return 0.5 * loss1 + 0.5 * loss2
| 751 | 24.066667 | 73 | py |
mIOHMM | mIOHMM-main/src/utils.py | import numpy as np
import pickle
import torch
def save_pickle(data, filename):
with open(filename, "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def load_pickle(filepath):
with open(filepath, "rb") as handle:
data = pickle.load(handle)
return data
def normalize(A, axis=None):
Z = torch.sum(A, axis=axis, keepdims=True)
idx = np.where(Z == 0)
Z[idx] = 1
return A / Z
def normalize_exp(log_P, axis=None):
a, _ = torch.max(log_P, keepdims=True, axis=axis)
P = normalize(torch.exp(log_P - a), axis=axis)
return P
| 586 | 19.964286 | 53 | py |
mIOHMM | mIOHMM-main/src/piomhmm.py | from scipy.special import gamma as gamma_fn
from sklearn.cluster import KMeans
from src.utils import normalize_exp
import math
import numpy as np
import pickle
import torch
torch.set_default_dtype(torch.float64)
class mHMM:
def __init__(
self,
data,
ins=None,
K=2,
k=5,
TM=None,
OM=None,
full_cov=False,
io=True,
state_io=False,
personalized_io=False,
personalized=False,
eps=0,
min_var=1e-6,
device="cpu",
priorV=False,
priorMu=False,
sample_num=1,
alpha=10.0,
beta=5.0,
UT=False,
var_fill=0.5,
VI_diag=False,
lr=0.001,
):
"""
personalized input-output hidden markov model. This class of models considers patient observations that are
modeled by several possible factors, which are turned on or off using flags. The most complete version of the
model is x_i,t | z_i,t = k, d_i,t ~ N(mu_k + R_i + (V_k + M_i)*D_i,t, sigma_k) where x_i,t is the observed data,
z_i,t is the latent state, d_i,t is the observed drug information, R_i is a personalized state effect, V_k is
a state-based drug effect, M_i is a personalized drug effect, and sigma_k is the covariance.
:param data: an n x t x d matrix of clinical observations
:param ins: an n x t matrix of input/drug information (note that this is assumed to be univariate)
:param k: number of latent states
:param TM: the time mask, indicates trailing zeros in the observation array
:param OM: the observation mask, indicates missing observations within the time series (i.e. missed visit)
:param full_cov: flag indicating if a full covariance matrix should be used, alternatively a diagonal covariance is used
:param io: flag indicating if the model is an input-output HMM; drugs should not be none if io=True
:param state_io: flag indicating if input-output effects should be a function of state, if io=True and
state_io=False, V_k = V for all k. This flag should not be True if io=False
:param personalized_io: flag indicating if input-output effects should be a function of patient (i.e. M_i is
'turned on'). This flag should not be True if io=False
:param personalized: flag indicating if personalized state effects should be applied (i.e. R_i is 'turned on').
:param eps: prevent division by zero
:param min_var: set a minimum allowable variance
:param device: either cpu or cuda
:param priorV: indicates if priors should be used for the state- and personalized-drug effects
:param priorMu: indicates if prios should be used for the state-means
:param sample_num: number of saples used in MC sampling; only 1 sample is currently supported
:param alpha: parameter of the inverse gamma distribution used as prior for V_k and M_i
:param beta: parameter of the inverse gamma distribution used as prior for V_k and M_i
:param UT: parameter to enforce an upper triangular structure for the transition matrix
:param var_fill: parameter to specify initial guess for variance
:param VI_diag: flag to indicate whether or not the variational distributions should have a diagonal covariance structure
"""
# number of HMM mixtures
self.K = K
# number of latent states per HMM
self.k_per_hmm = k
# number of latent states
self.k = k * K
# flag to indicate whether or not to use a full covariance matrix (alternative is diagonal)
self.full_cov = full_cov
# flag to indicate whether or not the model is input-output
self.io = io
# flag to indicate if io effects should be a function of state
self.state_io = state_io
# flag to indicate if io effects should be personalized
self.perso_io = personalized_io
# flag to indicate if personalized (non-io) effects should be included
self.perso = personalized
# flag to indicate whether or not to use GPU
self.device = device
self.tensor_constructor = (
torch.cuda.DoubleTensor
if device == "cuda" and torch.cuda.is_available()
else torch.Tensor
)
# flag to indicate whether or not to have a prior on V
self.priorV = priorV
# flag to indicate whether or not to have a prior on mu
self.priorMu = priorMu
# store the parameters of the IG prior
self.alpha = torch.tensor(
[alpha], dtype=torch.float64, requires_grad=False, device=self.device
).float()
self.beta = torch.tensor(
[beta], dtype=torch.float64, requires_grad=False, device=self.device
).float()
# flag to indicate upper triangular structure for transition matrix
self.ut = UT
# flag to indicate whether to use diagonal covariance for variational distribution
self.VI_diag = VI_diag
# store the data used in analysis
self.data = data.to(device=self.device) # n x t x d
self.n, self.t, self.d = self.data.shape
# store likelihood (this is the objective if no personalized effects are used)
self.ll = []
if self.perso_io and self.perso:
# case with both personalized state and medication effects
self.elbo = [] # objective
self.mu_hat = torch.zeros(
self.n, self.d, requires_grad=True, device=self.device
)
# vector for optimization of covariance which is mapped into lower triangular cholesky factor
if self.VI_diag:
self.tril_vec = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.tril_vec = torch.tensor(
0.01 * np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
self.nu_hat = torch.zeros(
self.n, self.d, requires_grad=True, device=self.device
)
# vector for optimization of covariance which is mapped into lower triangular cholesky factor
if self.VI_diag:
self.tril = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.tril = torch.tensor(
0.01 * np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
self.optimizer = torch.optim.Adam(
[self.mu_hat, self.tril_vec, self.nu_hat, self.tril], lr=lr
)
elif self.perso_io:
# case with personalized medication effects
self.elbo = [] # objective
self.mu_hat = torch.zeros(
self.n, self.d, requires_grad=True, device=self.device
)
# vector for optimization of covariance which is mapped into lower triangular cholesky factor
if self.VI_diag:
self.tril_vec = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.tril_vec = torch.tensor(
0.1 * np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
self.optimizer = torch.optim.Adam([self.mu_hat, self.tril_vec], lr=lr)
elif self.perso:
# case with personalized state effects
self.elbo = [] # objective
self.nu_hat = torch.zeros(
self.n, self.d, requires_grad=True, device=self.device
)
# vector for optimization of covariance which is mapped into lower triangular cholesky factor
if self.VI_diag:
self.tril = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.tril = torch.tensor(
0.1 * np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
self.optimizer = torch.optim.Adam([self.nu_hat, self.tril], lr=lr, eps=1e-4)
# store the inputs used in analysis
if self.io:
self.ins = ins.to(self.device) # n x t x 1
# store the time mask
if TM is None:
self.tm = torch.ones(
self.n, self.t, requires_grad=False, device=self.device
)
else:
self.tm = TM.to(self.device) # n x t
# store the observation mask
if OM is None:
self.om = torch.ones(
self.n, self.t, requires_grad=False, device=self.device
)
else:
self.om = OM.to(self.device) # n x t
self.om_is = torch.ones(
(self.k, self.k, self.n, self.t - 1),
requires_grad=False,
device=device,
dtype=torch.bool,
)
for i in range(self.k):
self.om_is[i] = self.om[:, 1:].unsqueeze(0).repeat(self.k, 1, 1)
self.eps = eps
self.min_var = min_var
self.ini_var = var_fill
self.sample_num = sample_num
def initialize_model(self, km_init=True):
"""
Initializes the parameters of the PIOHMM model; internal method
km_init: flag to indicate if kmeans should be used to initialize the state means
"""
# All implementations of the model have the parameter set {mu, var, pi, A}
if km_init:
# initialize the means using kmeans
kmeans = KMeans(n_clusters=self.k, init="random").fit(
torch.reshape(self.data[:, :, :].cpu(), [self.n * self.t, self.d])
)
mu = torch.tensor(
kmeans.cluster_centers_, requires_grad=False, device=self.device
).float()
else:
# choose k initial points from data to initialize means
idxs = torch.from_numpy(np.random.choice(self.n, self.k, replace=False))
mu = self.data[idxs, 0, :]
if self.full_cov:
# create k random symmetric positive definite d x d matrices
R = 0.1 * torch.rand(self.k, self.d, self.d, requires_grad=False)
var = torch.stack(
[
0.5 * (R[i, :, :].squeeze() + torch.t(R[i, :, :].squeeze()))
+ self.d * torch.eye(self.d)
for i in range(self.k)
]
).to(self.device)
else:
var = self.tensor_constructor(self.k, self.d, device=self.device).fill_(
self.ini_var
)
# uniform prior
pi = torch.empty(self.k, requires_grad=False, device=self.device).fill_(
1.0 / self.k
)
# transition matrix
if self.ut:
# create UT matrices for each HMM
A = []
for _ in range(self.K):
A.append(
torch.triu(
torch.stack(
[
1.0
/ (self.k_per_hmm - i)
* torch.ones(
self.k_per_hmm,
requires_grad=False,
device=self.device,
)
for i in range(self.k_per_hmm)
]
)
)
)
# create the corresponding mask
new_triangular = torch.block_diag(
*[
torch.triu(
torch.ones(
self.k_per_hmm,
self.k_per_hmm,
requires_grad=False,
device=self.device,
)
)
for _ in range(self.K)
]
)
else:
# create non-UT matrices for each HMM
A = []
for _ in range(self.K):
A.append(
torch.stack(
[
1.0
/ self.k_per_hmm
* torch.ones(
self.k_per_hmm, requires_grad=False, device=self.device
)
for _ in range(self.k_per_hmm)
]
)
)
# create the corresponding mask
new_triangular = torch.block_diag(
*[
torch.ones(
self.k_per_hmm,
self.k_per_hmm,
requires_grad=False,
device=self.device,
)
for _ in range(self.K)
]
)
self.new_triangular = new_triangular
# obtain a block diagonal transition matrix
A = torch.block_diag(*A)
# for calculation stability
A += self.eps
params = {"mu": mu, "var": var, "pi": pi, "A": A}
# input transformation matrix
if self.io:
if self.state_io:
V = torch.zeros(self.k, self.d, requires_grad=False, device=self.device)
else:
V = torch.zeros(self.d, requires_grad=False, device=self.device)
params["V"] = V
# variational parameters
if self.perso_io:
# transformation matrix prior noise
# initialize using the mean of the IG distribution
mnoise = torch.tensor([0.5], device=self.device)
params["mnoise"] = mnoise
if self.priorV:
vnoise = torch.tensor([1.0], device=self.device)
params["vnoise"] = vnoise
if self.perso:
nnoise = torch.tensor([0.5], device=self.device)
params["nnoise"] = nnoise
if self.priorMu:
munoise = torch.tensor([1.0], device=self.device)
params["munoise"] = munoise
return params
def batch_mahalanobis(self, L, x, check=True):
"""
Computes the squared Mahalanobis distance :math:`\mathbf{x}^\top\mathbf{M}^{-1}\mathbf{x}`
for a factored :math:`\mathbf{M} = \mathbf{L}\mathbf{L}^\top`. internal method
Accepts batches for both L and x.
"""
flat_L = L.unsqueeze(0).reshape((-1,) + L.shape[-2:])
L_inv = (
torch.stack([torch.inverse(Li.t()) for Li in flat_L])
.view(L.shape)
.to(self.device)
)
batch_val = L_inv.shape[0]
if check:
return (
(
torch.stack(
[
x[i, :, :].unsqueeze(-1) * L_inv[i, :, :]
for i in range(batch_val)
]
)
)
.sum(-2)
.pow(2.0)
.sum(-1)
)
else:
return (
(
torch.stack(
[
x[i, :].unsqueeze(-1) * L_inv[i, :, :]
for i in range(batch_val)
]
)
)
.sum(-2)
.pow(2.0)
.sum(-1)
)
def batch_diag(self, bmat):
"""
Returns the diagonals of a batch of square matrices; internal method
"""
return bmat.reshape(bmat.shape[:-2] + (-1,))[..., :: bmat.size(-1) + 1]
def log_gaussian(self, params, m_sample=None, n_sample=None):
"""
Returns the density of the model data given the current parameters; internal method
:param params: set of model parameters
:param m_sample: current sample of m_i, only applicable for perso_io=True
:param n_sample: current sample of r_i, only applicable for perso=True
:return: log likelihood at each time point for each possible cluster component, k x n x t
"""
# unpack params
mu = params["mu"]
var = params["var"]
log_norm_constant = self.d * torch.log(
2 * torch.tensor(math.pi, device=self.device)
)
if self.full_cov:
try:
# This try statement helps catch issues related to singular covariance, which can be an issue that is difficult to trace
L = torch.linalg.cholesky(var)
except:
print(var)
print(mu)
print(params["A"])
print(params["V"])
with open("miohmm_var.pkl", "wb") as handle:
pickle.dump(var, handle)
r = self.data[None, :, :, :] - mu[:, None, None, :]
if self.io:
V = params["V"]
if self.state_io:
r = r - V[:, None, None, :] * self.ins[None, :, :, None]
else:
r = r - V * self.ins[:, :, None]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[None, :, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
md = self.batch_mahalanobis(L, r)
log_det = 2 * self.batch_diag(L).abs().log().sum(-1).to(self.device)
log_p = -0.5 * (md + log_norm_constant + log_det[:, None, None])
else:
r = self.data[None, :, :, :] - mu[:, None, None, :]
if self.io:
V = params["V"]
if self.state_io:
r = r - V[:, None, None, :] * self.ins[None, :, :, None]
else:
r = r - V * self.ins[:, :, None]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[:, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
r = r ** 2
log_p = -0.5 * (var.log()[:, None, None, :] + r / var[:, None, None, :])
log_p = log_p + log_norm_constant
log_p = log_p.sum(-1)
return log_p
def log_gaussian_prior(self, rv, mu, L):
"""
Returns the probability of random varaible rv with mean mu and variance var; does not support full covariance
structure; internal method
:param rv d
:param mu d
:param L cholesky matrix for covariance of RV
:return: log probability
"""
d = np.shape(rv)[0]
log_norm_constant = (
-0.5 * d * torch.log(2 * torch.tensor(math.pi, device=self.device))
)
r = rv - mu
md = self.batch_mahalanobis(L, r, check=False)
log_det = self.batch_diag(L).abs().log().sum(-1).to(self.device)
log_p = -0.5 * md + log_norm_constant - log_det
return log_p
def log_ig(self, noise):
"""
Returns the probability of the inverse gamma prior; internal method
:return:
"""
log_ig = (
self.alpha * torch.log(self.beta)
- torch.log(gamma_fn(self.alpha.cpu())).to(self.device)
- (self.alpha + 1.0) * torch.log(noise)
- self.beta / noise
)
return log_ig
def get_likelihoods(self, params, log=True, m_sample=None, n_sample=None):
"""
:param log: flag to indicate if likelihood should be returned in log domain; internal method
:return likelihoods: (k x n x t)
"""
log_likelihoods = self.log_gaussian(params, m_sample, n_sample)
if not log:
log_likelihoods.exp_()
# multiply the liklihoods by the observation mask
return (log_likelihoods * self.om[None, :, :]).to(self.device)
def get_exp_data(self, mu, var, V=None, m_sample=None, n_sample=None):
"""
Function to calculate the expectation of the conditional log-likelihood with respect to the variational
approximation q(M|X); internal method
:return: expectation of the conditional log-likelihood wrt the variational approximation
"""
if self.full_cov:
L = torch.linalg.cholesky(var)
else:
L = torch.zeros(self.k, self.d, self.d)
for i in range(self.k):
L[i, :, :] = torch.diag(torch.sqrt(var[i, :]))
r = self.data[None, :, :, :] - mu[:, None, None, :]
if self.io:
r = r - V[:, None, None, :] * self.ins[None, :, :, None]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[None, :, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
const = self.d * torch.log(
2 * torch.tensor(math.pi, device=self.device)
) # scalar
logdet = 2 * self.batch_diag(L).abs().log().sum(-1).to(self.device) # k
md1 = self.batch_mahalanobis(L, r) # k x n x t
out = -0.5 * (const + logdet[:, None, None] + md1)
return out
def get_exp_M(self, mnoise):
"""
Function to calculate the expectation of the prior with respect to the variational approximation q(M|X); internal method
:return: expectation of the prior on M wrt the variational approximation
"""
out = (
-self.d
* self.n
/ 2
* torch.log(2 * torch.tensor(math.pi, device=self.device) * mnoise)
- (
1 / mnoise / 2 * torch.einsum("kij, kij -> k", [self.L_hat, self.L_hat])
).sum()
- (
1 / mnoise / 2 * torch.einsum("ij,ij->i", [self.mu_hat, self.mu_hat])
).sum()
)
return out
def get_exp_V(self, V, vnoise):
"""
Function to calculate the expectation of the prior with respect to the variational approximation q(M|X); internal method
:return: expectation of the prior on M wrt the variational approximation
"""
out = (
-self.d
* self.k
/ 2
* torch.log(2 * torch.tensor(math.pi, device=self.device) * vnoise)
- (1 / vnoise / 2 * torch.einsum("ij,ij->i", [V, V])).sum()
)
return out
def get_exp_Mtilde(self, nnoise):
"""
Function to calculate the expectation of the prior with respect to the variational approximation q(M|X); internal method
:return: expectation of the prior on M wrt the variational approximation
"""
out = (
-self.d
* self.n
/ 2
* torch.log(2 * torch.tensor(math.pi, device=self.device) * nnoise)
- (
1 / nnoise / 2 * torch.einsum("kij, kij -> k", [self.N_hat, self.N_hat])
).sum()
- (
1 / nnoise / 2 * torch.einsum("ij,ij->i", [self.nu_hat, self.nu_hat])
).sum()
)
return out
def exp_log_joint(self, params, e_out, samples):
"""
Function to calculate the expectation of the joint likelihood with respect to the variational approximation; internal method
:return: log joint likelihood
"""
# unpack parameters
pi = params["pi"]
A = params["A"]
logA = A.log()
logA[torch.isinf(logA)] = 0
mu = params["mu"]
var = params["var"]
m_sample = samples["m_sample"]
n_sample = samples["n_sample"]
gamma = e_out["gamma"]
xi = e_out["xi"]
if self.io:
V = params["V"]
lj = (
(gamma[:, :, 0].exp() * pi[:, None].log()).sum()
+ (xi.exp() * logA[:, :, None, None] * self.om[None, None, :, 1:]).sum()
+ (
self.get_exp_data(
mu, var, V=V, m_sample=m_sample, n_sample=n_sample
)
* gamma.exp()
* self.om[None, :, :]
).sum()
)
else:
lj = (
(gamma[:, :, 0].exp() * pi[:, None].log()).sum()
+ (xi.exp() * logA[:, :, None, None] * self.om[None, None, :, 1:]).sum()
+ (
self.get_exp_data(mu, var, m_sample=m_sample, n_sample=n_sample)
* gamma.exp()
* self.om[None, :, :]
).sum()
)
if self.perso:
nnoise = params["nnoise"]
lj = lj + self.get_exp_Mtilde(nnoise)
if self.perso_io:
mnoise = params["mnoise"]
lj = lj + self.get_exp_M(mnoise)
if self.priorV:
vnoise = params["vnoise"]
mnoise = params["mnoise"]
V = params["V"]
lj = (
lj
+ self.log_ig(vnoise)
+ self.log_ig(mnoise)
+ self.get_exp_V(V, vnoise)
)
if self.priorMu:
munoise = params["munoise"]
nnoise = params["nnoise"]
lj = (
lj
+ self.log_ig(munoise)
+ self.log_ig(nnoise)
+ self.get_exp_V(mu, munoise)
)
return lj
def entropy(self, e_out):
"""
Function to calculate the entropy; internal method
:return:
"""
gamma = e_out["gamma"]
xi = e_out["xi"]
gamma_sum = gamma[:, :, 0].exp() * gamma[:, :, 0] * self.om[None, :, 0]
if self.ut:
xi_sum = 0
for i in range(self.n):
for j in range(1, self.t):
xi_sum = (
xi_sum
+ (
torch.triu(xi[:, :, i, j - 1]).exp()
* torch.triu(xi[:, :, i, j - 1])
* self.om[None, None, i, j]
).sum()
)
xi_sum = (
xi_sum
- (xi.exp() * gamma[:, None, :, :-1] * self.om[None, None, :, 1:]).sum()
)
else:
xi_sum = (
xi.exp() * (xi - gamma[:, None, :, :-1]) * self.om[None, None, :, 1:]
).sum()
et = -gamma_sum.sum() - xi_sum
if self.perso_io:
logdet = 2 * self.batch_diag(self.L_hat).abs().log().sum(-1)
diffe = (
0.5
* (logdet + self.d * np.log(2 * torch.tensor(math.pi)) + self.d).sum()
)
et = et + diffe
if self.perso:
logdet = 2 * self.batch_diag(self.N_hat).abs().log().sum(-1)
diffe = (
0.5
* (logdet + self.d * np.log(2 * torch.tensor(math.pi)) + self.d).sum()
)
et = et + diffe
return et
def variational_obj(self, params, e_out, samples):
"""
Function to calculate the elbo using the expectation of the joint likelihood and the entropy; internal method
:return:
"""
obj1 = -self.exp_log_joint(params, e_out, samples)
obj2 = -self.entropy(e_out)
self.elbo.append((obj1 + obj2).item())
return obj1 + obj2
def baseline_variational_obj(self, params, e_out, samples):
"""
Function to calculate the elbo when only one time point has been observed; internal method
"""
# unpack parameters
pi = params["pi"]
A = params["A"]
logA = A.log()
logA[torch.isinf(logA)] = 0
mu = params["mu"]
var = params["var"]
m_sample = samples["m_sample"]
n_sample = samples["n_sample"]
gamma = e_out["gamma"]
if self.io:
V = params["V"]
lj = (gamma[:, :, 0].exp() * pi[:, None].log()).sum() + (
self.get_exp_data(mu, var, V=V, m_sample=m_sample, n_sample=n_sample)
* gamma.exp()
* self.om[None, :, :]
).sum()
else:
lj = (gamma[:, :, 0].exp() * pi[:, None].log()).sum() + (
self.get_exp_data(mu, var, m_sample=m_sample, n_sample=n_sample)
* gamma.exp()
* self.om[None, :, :]
).sum()
if self.perso:
nnoise = params["nnoise"]
lj = lj + self.get_exp_Mtilde(nnoise)
if self.perso_io:
mnoise = params["mnoise"]
lj = lj + self.get_exp_M(mnoise)
if self.priorV:
vnoise = params["vnoise"]
mnoise = params["mnoise"]
V = params["V"]
lj = (
lj
+ self.log_ig(vnoise)
+ self.log_ig(mnoise)
+ self.get_exp_V(V, vnoise)
)
if self.priorMu:
munoise = params["munoise"]
nnoise = params["nnoise"]
lj = (
lj
+ self.log_ig(munoise)
+ self.log_ig(nnoise)
+ self.get_exp_V(mu, munoise)
)
gamma_sum = gamma[:, :, 0].exp() * gamma[:, :, 0] * self.om[None, :, 0]
et = -gamma_sum.sum()
if self.perso_io:
logdet = 2 * self.batch_diag(self.L_hat).abs().log().sum(-1)
diffe = (
0.5
* (logdet + self.d * np.log(2 * torch.tensor(math.pi)) + self.d).sum()
)
et = et + diffe
if self.perso:
logdet = 2 * self.batch_diag(self.N_hat).abs().log().sum(-1)
diffe = (
0.5
* (logdet + self.d * np.log(2 * torch.tensor(math.pi)) + self.d).sum()
)
et = et + diffe
self.elbo.append((-lj + -et).item())
return -lj - et
def forward(self, likelihood, params):
"""
Calculate the forward pass of the EM algorithm for the HMM (Baum-Welch); internal method
:param likelihood: log-likelihood of the data for the current parameters
:param params: current model parameters
:return: k x n x t log alpha's and the n x t scaling factors
Note this implementation uses the rescaled 'alpha-hats'
"""
# unpack params
pi = params["pi"]
A = params["A"]
logA = A.log()
alpha = torch.zeros(self.k, self.n, self.t, device=self.device)
scaling_factor = torch.zeros(self.n, self.t, device=self.device)
a = pi[:, None].log() + likelihood[:, :, 0]
scaling_factor[:, 0] = torch.logsumexp(a, dim=0)
alpha[:, :, 0] = a - scaling_factor[:, 0]
for i in range(1, self.t):
asample = alpha[
:, :, i - 1
] # this is the previous time point alpha, we need this for the recursion
# we'll use the log-sum-exp trick for stable calculation
a = likelihood[:, :, i] + torch.logsumexp(
asample[:, None, :] + logA[:, :, None], dim=0
)
scaling_factor[:, i] = torch.logsumexp(a, dim=0)
alpha[:, :, i] = a - scaling_factor[:, i]
# multiply the final results with the time mask to reset missing values to zero
alpha = alpha * self.tm[None, :, :]
scaling_factor = scaling_factor * self.tm
return (
alpha,
scaling_factor,
) # note that this is log alpha and log scaling factor
def backward(self, likelihood, params, scaling_factor):
"""
Calaculate the backward pass of the EM algorithm for the HMM (Baum-Welch); internal method
:param likelihood: log-likelihood of the data for the current parameters
:param params: current model parameters
:param scaling_factor: scaling factors calculated during the forward pass; required for numerical stability
:return: k x n x t log beta's
Note this implementation uses the rescaled 'beta-hats'
"""
# unpack params
logA = params["A"].log()
beta = torch.zeros(self.k, self.n, self.t, device=self.device)
for i in range(self.t - 2, -1, -1):
bsample = beta[
:, :, i + 1
] # this is the next time point beta, we need this for the recusion
# we'll use the log-sum-exp trick for stable calculation
b = torch.logsumexp(
bsample[None, :, :] + logA[:, :, None] + likelihood[None, :, :, i + 1],
dim=1,
)
tmi = self.tm[:, i + 1]
beta[:, :, i] = (b - scaling_factor[:, i + 1]) * tmi[None, :]
return beta # note that this is log beta
def e_step(self, params, fixSample=False):
"""
'expectation step' for the EM algorithm (Baum-Welch); internal method
:return: updates gamma, xi and the log-likelihood
"""
# default setting is to assume no personalized effects
m_sample = None
n_sample = None
if self.perso_io:
# sample from the variational approximation of M_i
if fixSample:
m_sample = self.mu_hat
else:
e_sample = torch.randn(self.n, self.d, device=self.device)
m_sample = (
torch.einsum("ijk,ik->ij", [self.L_hat, e_sample]) + self.mu_hat
)
if self.perso:
if fixSample:
n_sample = self.nu_hat
else:
e_sample = torch.randn(self.n, self.d, device=self.device)
n_sample = (
torch.einsum("ijk,ik->ij", [self.N_hat, e_sample]) + self.nu_hat
)
likelihood = self.get_likelihoods(params, m_sample=m_sample, n_sample=n_sample)
alpha, scaling_factor = self.forward(likelihood, params)
# NB: the exponentiated alpha sum over the first dimension should be one
beta = self.backward(likelihood, params, scaling_factor)
# the expontiated beta sum over the first dimension should be numerically well-behaved
gamma = alpha + beta # note this is log gamma
logA = params["A"].log()
xi = (
alpha[:, None, :, :-1]
+ beta[None, :, :, 1:]
+ likelihood[None, :, :, 1:]
+ logA[:, :, None, None]
- scaling_factor[None, None, :, 1:]
) # note this is log xi
pX = scaling_factor.sum()
e_out = {"xi": xi, "gamma": gamma, "pX": pX}
samples = {"m_sample": m_sample, "n_sample": n_sample}
if self.perso_io or self.perso:
self.optimizer.zero_grad()
self.variational_obj(params, e_out, samples).backward(retain_graph=True)
self.optimizer.step()
if self.perso_io:
# update the variational parameters mu_hat and L_hat using gradient descent
if self.VI_diag:
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
if self.perso:
if self.VI_diag:
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
return e_out, params, samples
def m_step(self, e_out, params, samples):
"""
fixed point equation for updating 'theta'
:return: updates mu_k, sigma_k, V, A, pi
"""
with torch.no_grad():
# un-pack parameters
gamma = e_out["gamma"]
xi = e_out["xi"]
var = params["var"]
if self.io:
V = params["V"]
if self.priorV:
vnoise = params["vnoise"]
if self.perso_io:
m_sample = samples["m_sample"]
if self.perso:
n_sample = samples["n_sample"]
if self.priorMu:
munoise = params["munoise"]
# compute `N_k` the proxy "number of points" assigned to each distribution.
# gamma is k x n x t
N_k1 = ((gamma[:, :, 0].exp()) * self.om[None, :, 0]).sum(1)
N_k = ((gamma.exp()) * self.om[None, :, :]).sum(-1).sum(-1)
# get the means by taking the weighted combination of points
r = self.data
if self.io:
if self.state_io:
r = r - V[:, None, None, :] * self.ins[None, :, :, None]
else:
r = r - V * self.ins[:, :, None]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[None, :, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
else:
if self.perso:
r = r - n_sample[:, None, :]
if self.priorMu:
if self.full_cov:
if self.state_io:
num = torch.einsum(
"ijk,ijkl->il",
[
gamma.exp() * self.om[None, :, :],
r * self.om[:, :, None],
],
)
else:
num = torch.einsum(
"ijk,jkl->il",
[
gamma.exp() * self.om[None, :, :],
r * self.om[:, :, None],
],
)
denom = (
torch.einsum(
"i, ijk->ijk",
[
torch.sum(gamma.exp() * self.om[None, :, :], (1, 2)),
torch.stack(
[
torch.eye(self.d, device=self.device)
for _ in range(self.k)
]
),
],
)
+ var / munoise
)
MU_LU = torch.lu(denom)
mu = torch.lu_solve(num, *MU_LU)
else:
if self.state_io:
mu = torch.einsum(
"ijk,ijkl->il",
[
gamma.exp() * self.om[None, :, :],
r * self.om[:, :, None],
],
) / (torch.sum(gamma.exp(), (1, 2)) + var / munoise)
else:
mu = torch.einsum(
"ijk,jkl->il",
[
gamma.exp() * self.om[None, :, :],
r * self.om[:, :, None],
],
) / (torch.sum(gamma.exp(), (1, 2))[:, None] + var / munoise)
else:
if self.state_io:
mu = torch.einsum(
"ijk,ijkl->il",
[gamma.exp() * self.om[None, :, :], r * self.om[:, :, None]],
)
mu = mu / (N_k[:, None] + self.eps)
else:
mu = torch.einsum(
"ijk,jkl->il",
[gamma.exp() * self.om[None, :, :], r * self.om[:, :, None]],
)
mu = mu / (N_k[:, None] + self.eps)
# update the matrix which tansforms the drug information
if self.io:
r = self.data - mu[:, None, None, :]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[None, :, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
if self.priorV:
num = torch.einsum(
"ijk,ijkl->il",
[
gamma.exp() * self.om[None, :, :],
r * self.ins[None, :, :, None] * self.om[None, :, :, None],
],
)
denom = (
torch.einsum(
"i,ijk->ijk",
[
torch.sum(
gamma.exp()
* self.ins[None, :, :] ** 2
* self.om[None, :, :],
(1, 2),
),
torch.stack(
[
torch.eye(self.d, device=self.device)
for _ in range(self.k)
]
),
],
)
+ var / vnoise
)
V_LU = torch.lu(denom)
V = torch.lu_solve(num[:, :, None], *V_LU).squeeze()
else:
if self.state_io:
V = torch.einsum(
"ijk,ijkl->il",
[
gamma.exp() * self.om[None, :, :],
r
* self.ins[None, :, :, None]
* self.om[None, :, :, None],
],
)
denom = torch.sum(
gamma.exp()
* self.ins[None, :, :] ** 2
* self.om[None, :, :],
(1, 2),
)
V = V / denom[:, None]
else:
V = torch.einsum(
"ijk,ijkl->l",
[
gamma.exp() * self.om[None, :, :],
r * self.ins[:, :, None] * self.om[None, :, :, None],
],
)
V = V / torch.sum(
((gamma.exp()) * self.tm[None, :, :])
* ((self.ins[None, :, :] * self.om[None, :, :]) ** 2)
)
# compute the diagonal covar. matrix, by taking a weighted combination of
# the each point's square distance from the mean
r = self.data - mu[:, None, None]
if self.io:
if self.state_io:
r = r - V[:, None, None, :] * self.ins[None, :, :, None]
else:
r = r - V[None, None, None, :] * self.ins[None, :, :, None]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[None, :, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
r = r * self.om[None, :, :, None]
if self.full_cov:
if self.perso_io:
var = (
(gamma[:, :, :, None, None].exp())
* self.om[None, :, :, None, None]
) * (torch.einsum("ijkl,ijkm->ijklm", [r, r]))
else:
var = (
(gamma[:, :, :, None, None].exp())
* self.om[None, :, :, None, None]
) * torch.einsum("ijkl,ijkm->ijklm", [r, r])
var = var.sum(1).sum(1) / (N_k[:, None, None] + self.eps)
# add variance ridge to prevent non psd covariance matrices
var = torch.stack(
[
var[i, :, :] + 1e-4 * torch.eye(self.d).to(self.device)
for i in range(self.k)
]
)
else:
var = torch.einsum(
"ijk,ijkl->il", [(gamma.exp()) * self.om[None, :, :], r ** 2]
)
var = var / (N_k[:, None] + self.eps)
var = torch.clamp(var, min=self.min_var)
if self.perso_io:
# compute the prior mnoise
if self.priorV:
mnoise = (
1
/ (2 * self.alpha + 2 + self.n * self.d)
* (
2 * self.beta
+ (torch.einsum("ij,ij->i", [m_sample, m_sample])).sum()
)
)
else:
mnoise = (
1
/ self.n
/ self.d
* (torch.einsum("ij,ij->i", [m_sample, m_sample])).sum()
)
if self.priorV:
vnoise = (
1
/ (2 * self.alpha + 2 + self.d * self.k)
* (2 * self.beta + (torch.einsum("ij, ij ->i", [V, V])).sum())
)
# CHECK DERIVATION HERE
if self.perso:
if self.priorMu:
nnoise = (
1
/ (2 * self.alpha + 2 + self.n * self.d)
* (
2 * self.beta
+ (torch.einsum("ij,ij->i", [n_sample, n_sample])).sum()
)
)
else:
nnoise = (
1
/ self.n
/ self.d
* (torch.einsum("ij,ij->i", [n_sample, n_sample])).sum()
)
if self.priorMu:
munoise = (
1
/ (2 * self.alpha + 2 + self.d * self.k)
* (2 * self.beta + (torch.einsum("ij, ij-> i", [mu, mu])).sum())
)
# recompute the mixing probabilities
pi = N_k1 / N_k1.sum() + self.eps
# recompute the transition matrix
# # xi is k x k x n x t - 1
logA = torch.logsumexp(
xi.masked_fill(self.om_is == False, -1e18).reshape(
self.k, self.k, self.n * (self.t - 1)
),
dim=-1,
)
A = normalize_exp(logA, axis=1) + self.eps * self.new_triangular
params = {
"mu": mu.to(self.device),
"var": var.to(self.device),
"pi": pi.to(self.device),
"A": A.to(self.device),
}
if self.io:
params["V"] = V.to(self.device)
if self.perso_io:
params["mnoise"] = mnoise.to(self.device)
if self.perso:
params["nnoise"] = nnoise.to(self.device)
if self.priorV:
params["vnoise"] = vnoise.to(self.device)
if self.priorMu:
params["munoise"] = munoise.to(self.device)
return params
def learn_model(
self,
num_iter=1000,
use_cc=False,
cc=1e-6,
intermediate_save=True,
load_model=False,
model_name=None,
):
"""
function to learn the parameters of the PIOHMM
:param num_iter: number of steps for the learning procedure
:param use_cc: flag to indicate if a convergence criteria should be used
:param cc: tolerance for the convergence criteria
:param intermediate_save: flag to indicate if parameters should be saved during training
:param load_model: flag to indicate if parameters should be loaded from a saved model
:param model_name: file name of model to be loaded
:return: depends on the type of model and will include parameters, variational parameters, liklihood, elbo
"""
if load_model:
load_params = torch.load(model_name)
A = load_params["A"]
mu = load_params["mu"]
var = load_params["var"]
pi = load_params["pi"]
V = load_params["V"]
params = {"mu": mu, "var": var, "pi": pi, "A": A, "V": V}
# variational parameters
if self.perso_io:
# transformation matrix prior noise
# initialize using the mean of the IG distribution
mnoise = torch.tensor([1.5], device=self.device)
params["mnoise"] = mnoise
if self.perso:
nnoise = torch.tensor([0.5], device=self.device)
params["nnoise"] = nnoise
if self.priorMu:
munoise = torch.tensor([1.0], device=self.device)
params["munoise"] = munoise
else:
params = self.initialize_model()
prev_cost = float("inf")
for _ in range(num_iter):
if _ % 500 == 0:
print("Iteration ", _, flush=True)
if intermediate_save:
if _ % 500 == 0:
print("Iteration ", _)
if self.device[:4] == "cuda":
print(torch.cuda.get_device_name(0))
print("Memory Usage:")
print(
"Allocated:",
round(torch.cuda.memory_allocated(0) / 1024 ** 3, 1),
"GB",
)
print(
"Cached: ",
round(torch.cuda.memory_cached(0) / 1024 ** 3, 1),
"GB",
)
torch.save(
{
"params": params,
"elbo": self.elbo,
"entropy": self.ent,
"exp_ll": self.ell,
"log_prob": self.ll,
"mi": self.mu_hat,
"Li": self.L_hat,
},
"../results/PD_HMM_Model_iter"
+ str(_)
+ "_k"
+ str(self.k)
+ ".pkl",
)
# e-step, calculate the 'responsibilities'
e_out, params, samples = self.e_step(params)
# compute the cost and check for convergence
obj = e_out["pX"].item()
self.ll.append(obj)
if use_cc:
diff = prev_cost - obj
if np.abs(diff) < cc:
print("Breaking ", _)
break
prev_cost = obj
# m-step, update the parameters
params = self.m_step(e_out, params, samples)
params["A"] = params["A"] * self.new_triangular
if self.perso_io and self.perso:
return (
params,
e_out,
self.ll,
self.elbo,
self.mu_hat,
self.L_hat,
self.nu_hat,
self.N_hat,
)
elif self.perso:
return params, e_out, self.ll, self.elbo, self.nu_hat, self.N_hat
elif self.perso_io:
return params, e_out, self.ll, self.elbo, self.mu_hat, self.L_hat
else:
return params, e_out, self.ll
def est_test_pX(self, params):
"""
calaculate the marginal probability of the observed data
"""
likelihood = self.get_likelihoods(params, fixSample=True)
alpha, scaling_factor = self.forward(likelihood, params)
return scaling_factor.sum()
def learn_baseline_vi_params(self, params, num_iter=1000, intermediate_save=False):
"""
function to learn personalized parameters using only baseline data and fixed estimates of 'theta'
:param params: fixed estimates of 'theta'
:param num_iter: number of steps for the learning procedure
:param intermediate_save: flag to indicate if parameters should be saved during training
:return inferred parameters
"""
for _ in range(num_iter):
# default setting is to assume no personalized effects
m_sample = None
n_sample = None
if self.perso_io:
e_sample = torch.randn(self.n, self.d, device=self.device)
m_sample = (
torch.einsum("ijk,ik->ij", [self.L_hat, e_sample]) + self.mu_hat
)
if self.perso:
e_sample = torch.randn(self.n, self.d, device=self.device)
n_sample = (
torch.einsum("ijk,ik->ij", [self.N_hat, e_sample]) + self.nu_hat
)
likelihood = self.get_likelihoods(
params, m_sample=m_sample, n_sample=n_sample
)
pi = params["pi"]
gamma = pi[:, None, None].log() + likelihood
e_out = {"gamma": gamma}
samples = {"m_sample": m_sample, "n_sample": n_sample}
if self.perso_io or self.perso:
self.optimizer.zero_grad()
self.baseline_variational_obj(params, e_out, samples).backward(
retain_graph=True
)
self.optimizer.step()
if self.perso_io:
# update the variational parameters mu_hat and L_hat using gradient descent
if self.VI_diag:
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
if self.perso:
if self.VI_diag:
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
if self.perso_io and self.perso:
return (
params,
e_out,
self.ll,
self.elbo,
self.mu_hat,
self.L_hat,
self.nu_hat,
self.N_hat,
)
elif self.perso:
return params, e_out, self.ll, self.elbo, self.nu_hat, self.N_hat
elif self.perso_io:
return params, e_out, self.ll, self.elbo, self.mu_hat, self.L_hat
else:
return params, e_out, self.ll
def learn_vi_params(self, params, num_iter=1000, intermediate_save=False):
"""
function to learn personalized parameters using fixed estimates of 'theta'
:param params: fixed estimates of 'theta'
:param num_iter: number of steps for the learning procedure
:param intermediate_save: flag to indicate if parameters should be saved during training
:return inferred parameters
"""
for _ in range(num_iter):
if _ % 50 == 0:
print("Iteration ", _, flush=True)
if intermediate_save:
if _ % 500 == 0:
print("Iteration ", _)
if self.device[:4] == "cuda":
print(torch.cuda.get_device_name(0))
print("Memory Usage:")
print(
"Allocated:",
round(torch.cuda.memory_allocated(0) / 1024 ** 3, 1),
"GB",
)
print(
"Cached: ",
round(torch.cuda.memory_cached(0) / 1024 ** 3, 1),
"GB",
)
torch.save(
{
"params": params,
"elbo": self.elbo,
"entropy": self.ent,
"exp_ll": self.ell,
"log_prob": self.ll,
"mi": self.mu_hat,
"Li": self.L_hat,
},
"../results/PD_HMM_Model_iter"
+ str(_)
+ "_k"
+ str(self.k)
+ ".pkl",
)
# e-step, calculate the 'responsibilities'
e_out, params, samples = self.e_step(params)
if self.perso_io and self.perso:
return (
params,
e_out,
self.ll,
self.elbo,
self.mu_hat,
self.L_hat,
self.nu_hat,
self.N_hat,
)
elif self.perso:
return params, e_out, self.ll, self.elbo, self.nu_hat, self.N_hat
elif self.perso_io:
return params, e_out, self.ll, self.elbo, self.mu_hat, self.L_hat
else:
return params, e_out, self.ll
def calc_pX(
self,
params,
num_samples=1,
importance_sampling=False,
mu_hat=None,
nu_hat=None,
L_hat=None,
N_hat=None,
fixSample=False,
):
"""
Function to calculate the test log likelihood
:param params: fixed estimate for 'theta'
:param num_samples: number of samples for importance sampling
:param importance_sampling: flag to indicate if importance sampling should be use
:param mu_hat: mean of the variational distribution for personalized medication effects
:param nu_hat: mean of the variational distrbution for personalized state effects
:param L_hat: cholesky factor for covariance matrix of the variational distribution for personalized medication effects
:param N_hat: cholesky factor for covariance matric of the variational distribution for personalized state effects
:param fixSample: flag to indicate if only the variational mean should be used
:return: test log likelihood
"""
if importance_sampling:
px = torch.zeros(num_samples, self.n)
for i in range(num_samples):
if self.perso_io:
e_sample = torch.randn(self.n, self.d, device=self.device)
m_sample = torch.einsum("ijk,ik->ij", [L_hat, e_sample]) + mu_hat
L_prior = torch.stack(
[
(
params["mnoise"].sqrt()
* torch.eye(self.d).to(self.device)
)
for _ in range(self.n)
]
).to(self.device)
sample_weight_m = self.log_gaussian_prior(
m_sample, torch.zeros(m_sample.shape).to(self.device), L_prior
) - self.log_gaussian_prior(m_sample, mu_hat, L_hat)
else:
m_sample = None
sample_weight_m = 0
if self.perso:
e_sample = torch.randn(self.n, self.d, device=self.device)
n_sample = torch.einsum("ijk,ik->ij", [N_hat, e_sample]) + nu_hat
N_prior = torch.stack(
[
(params["nnoise"].sqrt() * torch.eye(self.d)).to(
self.device
)
for _ in range(self.n)
]
).to(self.device)
sample_weight_n = self.log_gaussian_prior(
n_sample, torch.zeros(n_sample.shape).to(self.device), N_prior
) - self.log_gaussian_prior(n_sample, nu_hat, N_hat)
else:
n_sample = None
sample_weight_n = 0
likelihood = self.get_likelihoods(
params, m_sample=m_sample, n_sample=n_sample
)
alpha, scaling_factor = self.forward(likelihood, params)
# print((scaling_factor*sample_weight[:, None]).sum())
px[i, :] = scaling_factor.sum(-1) + sample_weight_m + sample_weight_n
out = torch.logsumexp(px, 0) - np.log(num_samples)
elif fixSample:
likelihood = self.get_likelihoods(params, m_sample=mu_hat, n_sample=nu_hat)
alpha, scaling_factor = self.forward(likelihood, params)
out = scaling_factor
else:
px = torch.zeros(num_samples, self.n)
for i in range(num_samples):
if self.perso_io:
mnoise = params["mnoise"]
# sample from the sampled from the MLE params
m_sample = mnoise.sqrt() * torch.randn(
self.n, self.d, device=self.device
)
else:
m_sample = None
if self.perso:
nnoise = params["nnoise"]
n_sample = nnoise.sqrt() * torch.randn(
self.n, self.d, device=self.device
)
else:
n_sample = None
likelihood = self.get_likelihoods(
params, m_sample=m_sample, n_sample=n_sample
)
alpha, scaling_factor = self.forward(likelihood, params)
px[i, :] = scaling_factor.sum(-1)
out = torch.logsumexp(px, 0) - np.log(num_samples)
return out
def predict_sequence(self, params, m_sample=None, n_sample=None):
"""
function to apply viterbi algorithm
:param params: fixed estimates of the model parameters
:param m_sample: value to use for the personalized medication effects
:param n_sample: value to use for the personalized state effects
:return mps: most probable sequence n x t
"""
likelihood = self.get_likelihoods(params, m_sample=m_sample, n_sample=n_sample)
mps = self.viterbi(likelihood, params)
return mps
def viterbi(self, likelihood, params):
"""
apply the viterbi algorithm to find the most probable sequence per patient; internal method
omega is the maximimum joint probability of the previous data and latent states ; the last value is the joint
distribution of the most probable path
:return:
"""
omega = torch.zeros(self.k, self.n, self.t).to(self.device)
psi = torch.zeros(self.k, self.n, self.t).to(self.device)
mps = torch.zeros(self.n, self.t).to(self.device)
logA = params["A"].log()
pi = params["pi"]
omega[:, :, 0] = pi[:, None].log() + likelihood[:, :, 0]
for i in range(1, self.t):
inner_max, psi[:, :, i] = torch.max(
logA[:, :, None] + omega[:, None, :, i - 1], dim=0
)
omega[:, :, i] = likelihood[:, :, i] + inner_max
mps[:, -1] = torch.argmax(omega[:, :, -1], dim=0)
val, _ = torch.max(omega[:, :, -1], dim=0)
for i in range(self.t - 2, -1, -1):
psi_sample = psi[:, :, i + 1]
mps[:, i] = torch.gather(psi_sample, 0, mps[:, i + 1].long().unsqueeze(0))
return mps
def change_data(
self, data, ins=None, OM=None, TM=None, reset_VI=True, params=[], lr=0.001
):
"""
Replace model dataset
:param data: new dataset to use; additional parameters are corresponding dataset features / descriptors
:return: none, updates to model params only
"""
self.data = data.to(self.device)
self.n = data.shape[0]
self.t = data.shape[1]
# store the inputs used in analysis
if self.io:
self.ins = ins.to(self.device) # n x t x 1
# store the time mask
if TM is None:
self.tm = torch.ones(
self.n, self.t, requires_grad=False, device=self.device
)
else:
self.tm = TM.to(self.device) # n x t
# store the observation mask
if OM is None:
self.om = torch.ones(
self.n, self.t, requires_grad=False, device=self.device
)
else:
self.om = OM.to(self.device) # n x t
if reset_VI:
if self.perso_io and self.perso:
self.elbo = []
mnoise = params["mnoise"]
mi_numpy = np.sqrt(mnoise.cpu().numpy()) * np.random.randn(
self.n, self.d
)
if self.device[:4] == "cuda":
self.mu_hat = (
torch.from_numpy(mi_numpy)
.float()
.cuda()
.to(self.device)
.requires_grad_()
)
else:
self.mu_hat = torch.from_numpy(mi_numpy).float().requires_grad_()
if self.VI_diag:
self.tril_vec = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.tril_vec = torch.tensor(
0.01
* np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
nnoise = params["nnoise"]
ni_numpy = np.sqrt(nnoise.cpu().numpy()) * np.random.randn(
self.n, self.d
)
if self.device[:4] == "cuda":
self.nu_hat = (
torch.from_numpy(ni_numpy)
.float()
.cuda()
.to(self.device)
.requires_grad_()
)
else:
self.nu_hat = torch.from_numpy(ni_numpy).float().requires_grad_()
if self.VI_diag:
self.tril = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
device=self.device,
requires_grad=True,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.tril = torch.tensor(
0.01
* np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
self.optimizer = torch.optim.Adam(
[self.mu_hat, self.tril_vec, self.nu_hat, self.tril], lr=lr
)
elif self.perso_io:
self.elbo = []
mnoise = params["mnoise"]
mi_numpy = np.sqrt(mnoise.cpu().numpy()) * np.random.randn(
self.n, self.d
)
if self.device[:4] == "cuda":
self.mu_hat = (
torch.from_numpy(mi_numpy)
.float()
.cuda()
.to(self.device)
.requires_grad_()
)
else:
self.mu_hat = torch.from_numpy(mi_numpy).float().requires_grad_()
if self.VI_diag:
self.tril_vec = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.tril_vec = torch.tensor(
0.01
* np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
self.optimizer = torch.optim.Adam([self.mu_hat, self.tril_vec], lr=lr)
elif self.perso:
self.elbo = []
nnoise = params["nnoise"]
ni_numpy = np.sqrt(nnoise.cpu().numpy()) * np.random.randn(
self.n, self.d
)
if self.device[:4] == "cuda":
self.nu_hat = (
torch.from_numpy(ni_numpy)
.float()
.cuda()
.to(self.device)
.requires_grad_()
)
else:
self.nu_hat = torch.from_numpy(ni_numpy).float().requires_grad_()
if self.VI_diag:
self.tril = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
device=self.device,
requires_grad=True,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.tril = torch.tensor(
0.01
* np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
self.optimizer = torch.optim.Adam(
[self.nu_hat, self.tril], lr=lr, eps=1e-4
)
def forward_pred(self, params, m_sample=None, n_sample=None):
"""
function to forecast one-step-ahead
:return:
osapd: one-step-ahead predictive density
bs: belief state
lpe: log probability evidence
"""
pi = params["pi"]
A = params["A"]
likelihood = self.get_likelihoods(params, m_sample=m_sample, n_sample=n_sample)
alpha, scaling_factor = self.forward(likelihood, params)
osapd = torch.zeros(self.k, self.n, self.t + 1).to(
self.device
) # one-step-ahead predictive density
# there is no data yet at t=0, use pi
osapd[:, :, 0] = pi[:, None].log()
osapd[:, :, 1:] = torch.logsumexp(
A[:, :, None, None].log() + alpha[:, None, :, :], dim=0
)
bs = likelihood + osapd # belief state
lpe = torch.logsumexp(bs, dim=0) # log probability evidence
bs = (bs - lpe[None, :, :]).exp()
osapd = osapd.exp() # return values in probability space
return osapd, bs, lpe
def forward_sample(self, prob, ns=100):
"""
prob: k x n x t 'one-step-ahead predictive density' p(z_it=j | x_i1, ... x_it-1)
return: samples from the one-step-ahed predictive density
"""
vals = torch.zeros(ns, self.n, self.t - 1, self.d)
for i in range(self.t - 1):
for j in range(self.n):
m = torch.distributions.categorical.Categorical(prob[:, j, i])
for k in range(ns):
draw = m.sample()
mvn = torch.distributions.multivariate_normal.MultivariateNormal(
self.mu[draw, :]
+ self.V[draw, :] * self.ins[j, i + 1]
+ self.mu_hat[j, :] * self.ins[j, i + 1],
covariance_matrix=self.var[draw, :, :]
+ torch.mm(self.L_hat[j, :, :], self.L_hat[j, :, :].t())
* self.ins[j, i + 1],
)
vals[k, j, i, :] = mvn.sample()
return vals
def load_model(self, filename, cpu=True):
"""
function to specifically add the variational model parameters because they are properties of the model; note that
variable names have been assumed here and the save file needs to be formatted accordingly
:param filename: name of filename containing model
:return: none
"""
if cpu:
trained_model = torch.load(filename, map_location=torch.device("cpu"))
else:
trained_model = torch.load(filename)
### IMPORTANT ####
# Note that this is currently not setup to continue training. tril_vec needs to be populated and have
# requires_grad = True to be able to continue training; this function is only to load in a model. Additional
# functionality is required to continue training
if self.perso_io:
self.mu_hat = trained_model["Mi"].to(self.device)
self.L_hat = trained_model["Li"].to(self.device)
if self.perso:
self.nu_hat = trained_model["ni"].to(self.device)
self.N_hat = trained_model["Ni"].to(self.device)
def baseline_risk(self, params, ns=500, type="sample", m_sample=None):
"""
Determine probabilities of state assignment at 1- and 2-years from baseline; sample the observed data using
those probabilities
:return:
"""
# unpack params
pi = params["pi"]
A = params["A"]
mu = params["mu"]
var = params["var"]
if self.io:
V = params["V"]
mnoise = params["mnoise"]
if type == "sample":
sample_1year = torch.zeros(ns, self.n, self.d)
sample_2year = torch.zeros(ns, self.n, self.d)
for k in range(ns):
if self.perso_io:
if m_sample is None:
m_sample = mnoise.sqrt() * torch.randn(size=(self.n, self.d))
likelihood = self.get_likelihoods(
params, log=False, m_sample=m_sample
) # k x n x t
# NB: one of the pi elements is zero so we don't work directly in the log space
p_z1 = (
pi[:, None]
* likelihood[:, :, 0].squeeze()
/ (pi[:, None] * likelihood[:, :, 0].squeeze()).sum(0)
)
p_z6month = (
A[:, :, None] * (A[:, :, None] * p_z1[:, None, :]).sum(0)
).sum(0)
p_z1year = p_z1
for i in range(4):
p_z1year = (A[:, :, None] * p_z1year[:, None, :]).sum(0)
p_z2year = p_z1year
for i in range(4):
p_z2year = (A[:, :, None] * p_z2year[:, None, :]).sum(0)
for j in range(self.n):
# check for underflow issues
if np.isnan(p_z1year[:, j].detach().numpy()).all():
p_z1year[:, j] = torch.zeros(self.k)
p_z1year[-1, j] = 1
p_z2year[:, j] = p_z1year[:, j]
m1 = torch.distributions.categorical.Categorical(p_z1year[:, j])
m2 = torch.distributions.categorical.Categorical(p_z2year[:, j])
draw = m1.sample()
if self.io:
mvn = torch.distributions.multivariate_normal.MultivariateNormal(
mu[draw, :]
+ (V[draw, :] + m_sample[j, :]) * self.ins[j, 4],
covariance_matrix=var[draw, :, :],
)
else:
mvn = torch.distributions.multivariate_normal.MultivariateNormal(
mu[draw, :], covariance_matrix=var[draw, :, :]
)
sample_1year[k, j, :] = mvn.sample()
draw = m2.sample()
if self.io:
mvn = torch.distributions.multivariate_normal.MultivariateNormal(
mu[draw, :]
+ (V[draw, :] + m_sample[j, :]) * self.ins[j, 8],
covariance_matrix=var[draw, :, :],
)
else:
mvn = torch.distributions.multivariate_normal.MultivariateNormal(
mu[draw, :], covariance_matrix=var[draw, :, :]
)
sample_2year[k, j, :] = mvn.sample()
elif type == "mean":
sample_1year = torch.zeros(self.n, self.d)
sample_2year = torch.zeros(self.n, self.d)
if self.perso_io:
m_sample = mnoise.sqrt() * torch.randn(size=(self.n, self.d))
likelihood = self.get_likelihoods(
params, log=False, m_sample=m_sample
) # k x n x t
# NB: one of the pi elements is zero so we don't work directly in the log space
p_z1 = (
pi[:, None]
* likelihood[:, :, 0].squeeze()
/ (pi[:, None] * likelihood[:, :, 0].squeeze()).sum(0)
)
# print('Check sum:', p_z1.sum(0))
p_z6month = (A[:, :, None] * (A[:, :, None] * p_z1[:, None, :]).sum(0)).sum(
0
)
p_z1year = p_z1
for i in range(4):
p_z1year = (A[:, :, None] * p_z1year[:, None, :]).sum(0)
p_z2year = p_z1year
for i in range(4):
p_z2year = (A[:, :, None] * p_z2year[:, None, :]).sum(0)
meds1 = self.ins[:, 4]
meds2 = self.ins[:, 8]
sample_1year = (
torch.einsum("ij, ik->jk", [p_z1year, mu])
+ torch.einsum("ij, ik->jk", [p_z1year, V]) * meds1[:, None]
)
sample_2year = (
torch.einsum("ij, ik->jk", [p_z2year, mu])
+ torch.einsum("ij, ik->jk", [p_z2year, V]) * meds2[:, None]
)
else:
sample_1year = torch.zeros(self.n, self.d)
sample_2year = torch.zeros(self.n, self.d)
if self.perso_io:
m_sample = mnoise.sqrt() * torch.randn(size=(self.n, self.d))
likelihood = self.get_likelihoods(
params, log=False, m_sample=m_sample
) # k x n x t
# NB: one of the pi elements is zero so we don't work directly in the log space
p_z1 = (
pi[:, None]
* likelihood[:, :, 0].squeeze()
/ (pi[:, None] * likelihood[:, :, 0].squeeze()).sum(0)
)
p_z6month = (A[:, :, None] * (A[:, :, None] * p_z1[:, None, :]).sum(0)).sum(
0
)
p_z1year = p_z1
for i in range(4):
p_z1year = (A[:, :, None] * p_z1year[:, None, :]).sum(0)
p_z2year = p_z1year
for i in range(4):
p_z2year = (A[:, :, None] * p_z2year[:, None, :]).sum(0)
idx1 = torch.argmax(p_z1year, dim=0)
idx2 = torch.argmax(p_z2year, dim=0)
for j in range(self.n):
# check for underflow issues
if np.isnan(p_z1year[:, j]).all():
p_z1year[:, j] = torch.zeros(self.k)
p_z1year[-1, j] = 1
p_z2year[:, j] = p_z1year[:, j]
sample_1year[j, :] = mu[idx1[j]] + V[idx1[j]] * self.ins[j, 4]
sample_2year[j, :] = mu[idx2[j]] + V[idx2[j]] * self.ins[j, 8]
return p_z1year, p_z2year, sample_1year, sample_2year, p_z1, p_z6month
if __name__ == "__main__":
# Note that this is also contained in the jupyter notebook for more integrated visuals
n = 200 # number of samples
d = 1 # dimensionality of observations
t = 30 # number of time steps
k = 2 # number of states
K = 2 # number of HMM mixtures
A_1 = torch.tensor([[0.8, 0.2], [0.2, 0.8]]) # transition matrix
A_2 = torch.tensor([[0.2, 0.8], [0.8, 0.2]]) # transition matrix
A = torch.block_diag(*[A_1, A_2])
pi = torch.ones(k * K) / (k * K) # initial state distribution
mu = torch.tensor([0.0, 2.0, 0.0, 2.0]) # state means
var = torch.tensor([0.1, 0.1, 0.1, 0.1]) # state covariance
b = 1 # limit of the uniform distribution to specify personalized state effects
X = torch.zeros((n, t, d))
Z = torch.zeros((n, t), dtype=torch.long)
for i in range(n):
for j in range(t):
if j == 0:
Z[i, j] = torch.multinomial(pi, num_samples=1).byte()
m_dist = torch.distributions.normal.Normal(
mu.index_select(0, Z[i, j]), var.index_select(0, Z[i, j])
)
X[i, j, :] = m_dist.sample()
else:
Z[i, j] = torch.multinomial(A[Z[i, j - 1], :], num_samples=1)
m_dist = torch.distributions.normal.Normal(
mu.index_select(0, Z[i, j]), var.index_select(0, Z[i, j])
)
X[i, j, :] = m_dist.sample()
X_hat = torch.zeros(n, t, d)
l = 1.0 # lengthscale for the SE kernel
s = 0.1 # sigma^2 for the SE kernel
# build covariance matrix
var_x = torch.zeros(t, t)
t_vec = torch.range(0, t)
for j in range(t):
for jj in range(t):
r = (t_vec[j] - t_vec[jj]) ** 2
var_x[j, jj] = 1 / s * torch.exp(-r / (2 * l))
L = torch.cholesky(var_x)
b_stor = torch.zeros(n)
for i in range(n):
e = torch.randn(t)
b_stor[i] = 2 * b * torch.rand(1) - b
X_hat[i, :, :] = (
torch.einsum("ik,k->i", [L, e])[None, :, None]
+ X[i, :, :]
+ b_stor[i] * torch.ones(1, t, 1)
)
# fit a personalized hmm
piohmm = mHMM(
X_hat,
k=k,
K=K,
full_cov=False,
priorV=False,
io=False,
personalized=True,
personalized_io=False,
state_io=False,
UT=False,
device="cpu",
eps=1e-18,
priorMu=True,
var_fill=0.5,
)
piohmm_params, _, _, elbo, b_hat, _ = piohmm.learn_model(
num_iter=10000, intermediate_save=False
)
piohmm_mps, _, _ = piohmm.predict_sequence(piohmm_params, n_sample=b_hat)
piohmm_xhat = np.zeros((n, t))
piohmm_xvar = np.zeros((n, t))
for i in range(n):
for j in range(t):
idx = np.where(piohmm_mps[i, j].numpy() == np.arange(k))[0][0]
piohmm_xhat[i, j] = (
piohmm_params["mu"][idx].numpy() + b_hat[i].detach().numpy()
)
piohmm_xvar[i, j] = 2 * np.sqrt(piohmm_params["var"][idx].numpy())
torch.save(
"model_results.pkl",
{"params": piohmm_params, "mps": piohmm_mps, "piohmm_xhat": piohmm_xhat},
)
| 89,160 | 37.867044 | 136 | py |
mIOHMM | mIOHMM-main/experiments/synthetic.py | from src.piomhmm import mHMM
from src.utils import save_pickle
import matplotlib.pyplot as plt
import numpy as np
import torch
def pred(model_name, model, params, b_hat):
model_mps = model.predict_sequence(params, n_sample=b_hat)
xhat = np.zeros((n, t))
xvar = np.zeros((n, t))
for i in range(n):
for j in range(t):
idx = np.where(model_mps[i, j].cpu().numpy() == np.arange(model.k))[0][0]
if model_name in ["HMM", "mHMM"]:
xhat[i, j] = params["mu"][idx].cpu().numpy()
else:
xhat[i, j] = (
params["mu"][idx].cpu().numpy() + b_hat[i].cpu().detach().numpy()
)
xvar[i, j] = 2 * np.sqrt(params["var"][idx].cpu().numpy())
return {"xhat": xhat, "xvar": xvar}
torch.manual_seed(0)
torch.set_default_dtype(torch.float64)
torch.set_printoptions(precision=2)
device = "cpu"
# DATA GENERATION
n = 200 # number of samples
d = 1 # dimensionality of observations
t = 30 # number of time steps
k = 2 # number of states
K = 2 # number of HMM mixtures
# set parameters
A_1 = torch.tensor([[0.8, 0.2], [0.2, 0.8]]) # transition matrix
A_2 = torch.tensor([[0.2, 0.8], [0.8, 0.2]]) # transition matrix
A = torch.block_diag(*[A_1, A_2])
pi = torch.ones(k * K) / (k * K) # initial state distribution
mu = torch.tensor([0.0, 2.0, 0.0, 2.0]) # state means
var = torch.tensor([0.1, 0.1, 0.1, 0.1]) # state covariance
b = 1.0 # specify the range of a uniform distribution over personalized state effects, e.g. r_i ~ Unif[-b, b]
# simulate the model
X = torch.zeros((n, t, d))
Z = torch.zeros((n, t), dtype=torch.long)
for i in range(n):
for j in range(t):
if j == 0:
Z[i, j] = torch.multinomial(pi, num_samples=1).byte()
# D[i, j] = torch.rand(1)
m_dist = torch.distributions.normal.Normal(
mu.index_select(0, Z[i, j]), var.index_select(0, Z[i, j])
)
X[i, j, :] = m_dist.sample()
else:
Z[i, j] = torch.multinomial(A[Z[i, j - 1], :], num_samples=1)
# D[i, j] = torch.rand(1)
m_dist = torch.distributions.normal.Normal(
mu.index_select(0, Z[i, j]), var.index_select(0, Z[i, j])
)
X[i, j, :] = m_dist.sample()
# add noise
X_hat = torch.zeros(n, t, d)
l = 1.0 # lengthscale for the SE kernel
s = 0.1 # sigma^2 for the SE kernel
# build covariance matrix
var_x = torch.zeros(t, t)
t_vec = torch.range(0, t)
for j in range(t):
for jj in range(t):
r = (t_vec[j] - t_vec[jj]) ** 2
var_x[j, jj] = s * torch.exp(-r / (2 * l ** 2))
L = torch.cholesky(var_x)
b_stor = torch.zeros(n)
for i in range(n):
e = torch.randn(t)
b_stor[i] = 2 * b * torch.rand(1) - b
X_hat[i, :, :] = (
torch.einsum("ik,k->i", [L, e])[None, :, None]
+ X[i, :, :]
+ b_stor[i] * torch.ones(1, t, 1)
)
# plot a number of samples generated
fig, axs = plt.subplots(3, 5, dpi=200)
fig.set_size_inches(12, 5)
for i, ax in enumerate(axs.flatten()):
ax.plot(X[i, :].numpy(), label="$x_i$")
ax.plot(X_hat[i, :].numpy(), label="$\hat{x}_i$")
ax.set_title("Sample " + str(i))
fig.tight_layout()
ax.legend(
loc="lower center", bbox_to_anchor=(-2.2, -0.95), fancybox=True, shadow=True, ncol=5
)
# FITTING MODELS
print("fitting standard HMM...")
hmm = mHMM(
X_hat,
k=k,
K=1,
full_cov=False,
priorV=False,
io=False,
personalized=False,
personalized_io=False,
state_io=False,
device=device,
eps=1e-18,
)
hmm_params, _, ll_hmm = hmm.learn_model(num_iter=10000, intermediate_save=False)
print("fitting standard mHMM...")
mhmm = mHMM(
X_hat,
k=k,
K=K,
full_cov=False,
priorV=False,
io=False,
personalized=False,
personalized_io=False,
state_io=False,
device=device,
eps=1e-18,
)
mhmm_params, _, ll_mhmm = mhmm.learn_model(num_iter=10000, intermediate_save=False)
print("fitting personalized HMM...")
phmm = mHMM(
X_hat,
k=k,
K=1,
full_cov=False,
priorV=False,
io=False,
personalized=True,
personalized_io=False,
state_io=False,
device=device,
eps=1e-18,
)
phmm_params, _, _, elbo_phmm, b_hat_phmm, _ = phmm.learn_model(
num_iter=10000, intermediate_save=False
)
print("fitting personalized mHMM...")
mphmm = mHMM(
X_hat,
k=2,
K=2,
full_cov=False,
priorV=False,
io=False,
personalized=True,
personalized_io=False,
state_io=False,
device=device,
eps=1e-18,
)
mphmm_params, _, _, elbo_mphmm, b_hat_mphmm, _ = mphmm.learn_model(
num_iter=10000, intermediate_save=False
)
# RESULTS
# save outputs
outputs = {
"HMM": {"model": hmm, "params": hmm_params, "b_hat": None},
"mHMM": {"model": mhmm, "params": mhmm_params, "b_hat": None},
"PHMM": {"model": phmm, "params": phmm_params, "b_hat": b_hat_phmm},
"mPHMM": {"model": mphmm, "params": mphmm_params, "b_hat": b_hat_mphmm},
}
preds = {}
for model_name in outputs:
model = outputs[model_name]["model"]
params = outputs[model_name]["params"]
b_hat = outputs[model_name]["b_hat"]
preds[model_name] = pred(model_name, model, params, b_hat)
save_pickle({"outputs": outputs, "preds": preds}, "outputs/synthetic_all.pkl")
# figure
fig, axs = plt.subplots(3, 4, dpi=200)
fig.set_size_inches(12, 8)
for j in range(3):
for i in range(4):
ax = axs[j][i]
ax.plot(X_hat[i, :].numpy(), "k:", label="$\hat{x}_i$")
if j == 0:
xhat = preds["HMM"]["xhat"]
xvar = preds["HMM"]["xvar"]
ax.plot(xhat[i, :], label="HMM $\mu_k \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
xhat = preds["PHMM"]["xhat"]
xvar = preds["PHMM"]["xvar"]
ax.plot(xhat[i, :], label="PHMM $(\mu_k + r^{(i)}) \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
elif j == 1:
xhat = preds["mHMM"]["xhat"]
xvar = preds["mHMM"]["xvar"]
ax.plot(xhat[i, :], label="mHMM $\mu_k \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
xhat = preds["mPHMM"]["xhat"]
xvar = preds["mPHMM"]["xvar"]
ax.plot(xhat[i, :], label="mPHMM $(\mu_k + r_i) \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
else:
xhat = preds["PHMM"]["xhat"]
xvar = preds["PHMM"]["xvar"]
ax.plot(xhat[i, :], label="PHMM $(\mu_k + r_i) \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
xhat = preds["mPHMM"]["xhat"]
xvar = preds["mPHMM"]["xvar"]
ax.plot(xhat[i, :], label="mPHMM $(\mu_k + r_i) \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
ax.set_xlabel("Time")
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_title("Sample " + str(i))
ax.set_ylim([-4, 4])
ax.legend(
loc="lower left",
bbox_to_anchor=(-2.7, -0.42),
fancybox=True,
shadow=True,
ncol=4,
)
fig.tight_layout()
fig.subplots_adjust(hspace=0.7)
fig.savefig(
"outputs/synthetic_x_hats.png",
dpi=400,
facecolor="w",
edgecolor="w",
orientation="portrait",
bbox_inches="tight",
pad_inches=0,
metadata={"Creator": None, "Producer": None, "CreationDate": None},
)
| 8,235 | 28 | 110 | py |
mIOHMM | mIOHMM-main/experiments/real.py | from src.piomhmm import mHMM
import numpy as np
import random
import torch
from src.utils import save_pickle, load_pickle
RANDOM_SEED = 0
torch.manual_seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
torch.set_default_dtype(torch.float64)
torch.set_printoptions(precision=2)
def preprocess(x, d):
# don't include samples which only have one measurement, i.e. aren't time series
remove_idx = np.where(np.sum(~np.isnan(x[:, :, 0]), axis=1) == 1)
x = np.delete(x, remove_idx, 0)
d = np.delete(d, remove_idx, 0)
# set any LEDD values greater than 5000 to 620 and rescale
d[d > 5000] = 620
d[np.isnan(d)] = 0
d = d / np.max(d)
# get time and observation masks
N, T, D, = x.shape
time_mask = np.ones((N, T))
for i in range(N):
ind = np.where(~np.isnan(x[i, :, 0]))[0][-1] + 1
time_mask[i, ind:] = 0
missing_mask = (~np.isnan(x[:, :, 0])).astype(float)
x[np.isnan(x)] = 0
# convert everything to tensors
X = torch.Tensor(x).float()
D = torch.Tensor(d).float()
TM = torch.Tensor(time_mask).float()
OM = torch.Tensor(missing_mask).bool()
return X, D, TM, OM
# data
data = load_pickle("processed/data_for_PIOHMM.pkl")
X_train, D_train, TM_train, OM_train = preprocess(data["x_train"], data["train_med"])
X_test, D_test, TM_test, OM_test = preprocess(data["x_test"], data["test_med"])
# experiment setting
device = "cpu"
k = 8 # number of hidden states
num_iter_train = 10000
num_iter_test = 5000
for K in [1, 2, 3, 4, 5]:
# mIOHMM
print("fitting mIOHMM...", flush=True)
model = mHMM(
X_train,
ins=D_train,
k=k,
K=K,
TM=TM_train,
OM=OM_train,
full_cov=False,
io=True,
personalized=False,
personalized_io=False,
state_io=True,
UT=True,
device=device,
eps=1e-18,
)
params_hat, e_out, ll = model.learn_model(
num_iter=num_iter_train, intermediate_save=False
)
training_pX = model.calc_pX(params_hat)
mIOHMM_model = {
"params": params_hat,
"e_out": e_out,
"ll": ll,
"training_pX": training_pX,
}
print("learning vi params ...\n", flush=True)
model.change_data(
X_test, ins=D_test, TM=TM_test, OM=OM_test, reset_VI=True, params=params_hat
)
params_hat, e_out_test, ll_test = model.learn_vi_params(
params_hat, num_iter=num_iter_test
)
test_pX = model.calc_pX(params_hat)
mIOHMM_model["params_test"] = params_hat
mIOHMM_model["e_out_test"] = e_out_test
mIOHMM_model["ll_test"] = ll_test
mIOHMM_model["test_pX"] = test_pX
save_pickle(
mIOHMM_model, "models/mIOHMM_" + str(K) + ".pkl",
)
| 2,760 | 26.61 | 85 | py |
MostAccurableMNIST_keras | MostAccurableMNIST_keras-master/DeepCNN.py | import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, pooling, Input
from keras.layers.convolutional import Conv2D, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
from keras.optimizers import Adam, Adagrad, RMSprop, Adadelta
np.random.seed(777) # for reproducibility
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 28,28,1)
X_test = X_test.reshape(X_test.shape[0], 28,28,1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
cnn = Sequential()
cnn.add(ZeroPadding2D((2, 2), input_shape=(28, 28, 1)))
cnn.add(Conv2D(64, (5, 5), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(ZeroPadding2D((2, 2)))
cnn.add(Conv2D(128, (5, 5), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(MaxPooling2D(strides=(2, 2)))
cnn.add(ZeroPadding2D((2, 2)))
cnn.add(Conv2D(256, (5, 5), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(ZeroPadding2D((1, 1)))
cnn.add(Conv2D(256, (3, 3), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(MaxPooling2D(strides=(2, 2)))
cnn.add(Dropout(0.2))
cnn.add(ZeroPadding2D((1, 1)))
cnn.add(Conv2D(512, (3, 3), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(Dropout(0.2))
cnn.add(ZeroPadding2D((1, 1)))
cnn.add(Conv2D(512, (3, 3), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(MaxPooling2D(strides=(2, 2)))
cnn.add(ZeroPadding2D((1, 1)))
cnn.add(Conv2D(1024, (3, 3), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(Dropout(0.2))
cnn.add(ZeroPadding2D((1, 1)))
cnn.add(Conv2D(1024, (3, 3), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(MaxPooling2D(strides=(2, 2)))
cnn.add(Flatten())
cnn.add(Dropout(0.5))
cnn.add(Dense(2048, activation="relu", kernel_initializer='he_normal'))
cnn.add(Dense(128, activation="relu", kernel_initializer='he_normal'))
cnn.add(Dense(10, activation="softmax"))
cnn.summary()
opt = Adagrad(lr=0.001, epsilon=1e-8, decay=0.)
cnn.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
cnn.fit(X_train, Y_train, batch_size=64, shuffle=True, epochs=50, validation_split=0.1)
score = cnn.evaluate(X_test, Y_test)
print(cnn.metrics_names)
print(score)
f = open("./saved/MNIST_DeepCNN_model.json", 'w')
f.write(cnn.to_json())
f.close()
cnn.save_weights('./saved/MNIST_DeepCNN_weight.h5') | 2,673 | 32.012346 | 87 | py |
EDGY | EDGY-master/DDF.py | import hydra
import hydra.utils as utils
import json
from pathlib import Path
import torch
import numpy as np
import librosa
from tqdm import tqdm
import pyloudnorm
from preprocess import preemphasis
from model import Encoder, Decoder
@hydra.main(config_path="Training/VQ-VAE/Configuration_files/DDF.yaml")
def DDF(cfg):
filter_list_path = Path(utils.to_absolute_path(cfg.filter_list))
with open(filter_list_path) as file:
filter_list = json.load(file)
in_dir = Path(utils.to_absolute_path(cfg.in_dir))
out_dir = Path(utils.to_absolute_path(cfg.out_dir))
out_dir.mkdir(exist_ok=True, parents=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder = Encoder(**cfg.model.encoder)
decoder = Decoder(**cfg.model.decoder)
encoder.to(device)
decoder.to(device)
print("Load checkpoint from: {}:".format(cfg.checkpoint))
checkpoint_path = utils.to_absolute_path(cfg.checkpoint)
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
encoder.load_state_dict(checkpoint["encoder"])
decoder.load_state_dict(checkpoint["decoder"])
encoder.eval()
decoder.eval()
meter = pyloudnorm.Meter(cfg.preprocessing.sr)
#---------------------------------------
if cfg.privacy_preference == "Low":
for wav_path, speaker_id, out_filename in tqdm(filter_list):
wav_path = in_dir / wav_path
# librosa.load (it will return audio time series, and its sampling rate)
wav, _ = librosa.load(wav_path.with_suffix(".wav"), sr=cfg.preprocessing.sr)
ref_loudness = meter.integrated_loudness(wav)
wav = wav / np.abs(wav).max() * 0.999
path = out_dir / out_filename
# to return raw recording in mel-spectrogram without any filtering
if cfg.output_type == "Embedding":
mel = librosa.feature.melspectrogram(
preemphasis(wav, cfg.preprocessing.preemph),
sr=cfg.preprocessing.sr,
n_fft=cfg.preprocessing.n_fft,
n_mels=cfg.preprocessing.n_mels,
hop_length=cfg.preprocessing.hop_length,
win_length=cfg.preprocessing.win_length,
fmin=cfg.preprocessing.fmin,
power=1)
logmel = librosa.amplitude_to_db(mel, top_db=cfg.preprocessing.top_db)
logmel = logmel / cfg.preprocessing.top_db + 1
mel = torch.FloatTensor(logmel).squeeze().to(device).numpy()
np.savetxt(path.with_suffix(".mel.txt"), mel)
# to return raw recording in waveform without any filtering
if cfg.output_type == "Recording":
librosa.output.write_wav(path.with_suffix(".wav"), wav.astype(np.float32), sr=cfg.preprocessing.sr)
#---------------------------------------
if cfg.privacy_preference == "Moderate":
dataset_path = Path(utils.to_absolute_path("Training/Datasets")) / cfg.dataset.path
with open(dataset_path / "speakers.json") as file:
speakers = sorted(json.load(file))
for wav_path, speaker_id, out_filename in tqdm(filter_list):
wav_path = in_dir / wav_path
wav, _ = librosa.load(
wav_path.with_suffix(".wav"),
sr=cfg.preprocessing.sr)
ref_loudness = meter.integrated_loudness(wav)
wav = wav / np.abs(wav).max() * 0.999
mel = librosa.feature.melspectrogram(
preemphasis(wav, cfg.preprocessing.preemph),
sr=cfg.preprocessing.sr,
n_fft=cfg.preprocessing.n_fft,
n_mels=cfg.preprocessing.n_mels,
hop_length=cfg.preprocessing.hop_length,
win_length=cfg.preprocessing.win_length,
fmin=cfg.preprocessing.fmin,
power=1)
logmel = librosa.amplitude_to_db(mel, top_db=cfg.preprocessing.top_db)
logmel = logmel / cfg.preprocessing.top_db + 1
mel = torch.FloatTensor(logmel).unsqueeze(0).to(device)
speaker = torch.LongTensor([speakers.index(speaker_id)]).to(device)
path = out_dir / out_filename
if cfg.output_type == "Recording":
with torch.no_grad():
vq, _ = encoder.encode(mel)
output = decoder.generate(vq, speaker)
output_loudness = meter.integrated_loudness(output)
output = pyloudnorm.normalize.loudness(output, output_loudness, ref_loudness)
librosa.output.write_wav(path.with_suffix(".wav"), output.astype(np.float32), sr=cfg.preprocessing.sr)
if cfg.output_type == "Embedding":
with torch.no_grad():
vq, _ = encoder.encode(mel)
speaker = decoder.speaker(speaker)
vq = vq.squeeze().to(device).numpy()
speaker = speaker.squeeze().to(device).numpy()
np.savetxt(path.with_suffix(".vq.txt"), vq)
np.savetxt(path.with_suffix(".speaker.txt"), speaker)
#---------------------------------------
if cfg.privacy_preference == "High":
dataset_path = Path(utils.to_absolute_path("Training/Datasets")) / cfg.dataset.path
with open(dataset_path / "speakers.json") as file:
speakers = sorted(json.load(file))
for wav_path, speaker_id, out_filename in tqdm(filter_list):
wav_path = in_dir / wav_path
wav, _ = librosa.load(
wav_path.with_suffix(".wav"),sr=cfg.preprocessing.sr)
ref_loudness = meter.integrated_loudness(wav)
wav = wav / np.abs(wav).max() * 0.999
mel = librosa.feature.melspectrogram(
preemphasis(wav, cfg.preprocessing.preemph),
sr=cfg.preprocessing.sr,
n_fft=cfg.preprocessing.n_fft,
n_mels=cfg.preprocessing.n_mels,
hop_length=cfg.preprocessing.hop_length,
win_length=cfg.preprocessing.win_length,
fmin=cfg.preprocessing.fmin,
power=1)
logmel = librosa.amplitude_to_db(mel, top_db=cfg.preprocessing.top_db)
logmel = logmel / cfg.preprocessing.top_db + 1
mel = torch.FloatTensor(logmel).unsqueeze(0).to(device)
speaker = torch.LongTensor([speakers.index(speaker_id)]).to(device)
path = out_dir / out_filename
if cfg.output_type == "Recording":
with torch.no_grad():
vq, _ = encoder.encode(mel)
output = decoder.generate(vq, speaker)
output_loudness = meter.integrated_loudness(output)
output = pyloudnorm.normalize.loudness(output, output_loudness, ref_loudness)
librosa.output.write_wav(path.with_suffix(".wav"), output.astype(np.float32), sr=cfg.preprocessing.sr)
if cfg.output_type == "Embedding":
with torch.no_grad():
vq, _ = encoder.encode(mel)
vq = vq.squeeze().cpu().numpy()
np.savetxt(path.with_suffix(".vq.txt"), vq)
if __name__ == "__main__":
DDF()
| 6,913 | 43.320513 | 111 | py |
EDGY | EDGY-master/Training/VQ-VAE/dataset.py | import numpy as np
import torch
from torch.utils.data import Dataset
import json
from random import randint
from pathlib import Path
class SpeechDataset(Dataset):
def __init__(self, root, hop_length, sr, sample_frames):
self.root = Path(root)
self.hop_length = hop_length
self.sample_frames = sample_frames
with open(self.root / "speakers.json") as file:
self.speakers = sorted(json.load(file))
min_duration = (sample_frames + 2) * hop_length / sr
with open(self.root / "train.json") as file:
metadata = json.load(file)
self.metadata = [
Path(out_path) for _, _, duration, out_path in metadata
if duration > min_duration
]
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
path = self.metadata[index]
path = self.root.parent / path
audio = np.load(path.with_suffix(".wav.npy"))
mel = np.load(path.with_suffix(".mel.npy"))
pos = randint(1, mel.shape[-1] - self.sample_frames - 2)
mel = mel[:, pos - 1:pos + self.sample_frames + 1]
audio = audio[pos * self.hop_length:(pos + self.sample_frames) * self.hop_length + 1]
speaker = self.speakers.index(path.parts[-2])
return torch.LongTensor(audio), torch.FloatTensor(mel), speaker
| 1,381 | 31.139535 | 93 | py |
EDGY | EDGY-master/Training/VQ-VAE/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from tqdm import tqdm
import numpy as np
from preprocess import mulaw_decode
def get_gru_cell(gru):
gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size)
gru_cell.weight_hh.data = gru.weight_hh_l0.data
gru_cell.weight_ih.data = gru.weight_ih_l0.data
gru_cell.bias_hh.data = gru.bias_hh_l0.data
gru_cell.bias_ih.data = gru.bias_ih_l0.data
return gru_cell
class Encoder(nn.Module):
def __init__(self, in_channels, channels, n_embeddings, embedding_dim, jitter=0):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv1d(in_channels, channels, 3, 1, 0, bias=False),
nn.BatchNorm1d(channels),
nn.ReLU(True),
nn.Conv1d(channels, channels, 3, 1, 1, bias=False),
nn.BatchNorm1d(channels),
nn.ReLU(True),
nn.Conv1d(channels, channels, 4, 2, 1, bias=False),
nn.BatchNorm1d(channels),
nn.ReLU(True),
nn.Conv1d(channels, channels, 3, 1, 1, bias=False),
nn.BatchNorm1d(channels),
nn.ReLU(True),
nn.Conv1d(channels, channels, 3, 1, 1, bias=False),
nn.BatchNorm1d(channels),
nn.ReLU(True),
nn.Conv1d(channels, embedding_dim, 1)
)
self.codebook = VQEmbeddingEMA(n_embeddings, embedding_dim)
self.jitter = Jitter(jitter)
def forward(self, mels):
z = self.encoder(mels)
z, loss, perplexity = self.codebook(z.transpose(1, 2))
z = self.jitter(z)
return z, loss, perplexity
def encode(self, mel):
z = self.encoder(mel)
z, indices = self.codebook.encode(z.transpose(1, 2))
return z, indices
class Jitter(nn.Module):
def __init__(self, p):
super().__init__()
self.p = p
prob = torch.Tensor([p / 2, 1 - p, p / 2])
self.register_buffer("prob", prob)
def forward(self, x):
if not self.training or self.p == 0:
return x
else:
batch_size, sample_size, channels = x.size()
dist = Categorical(self.prob)
index = dist.sample(torch.Size([batch_size, sample_size])) - 1
index[:, 0].clamp_(0, 1)
index[:, -1].clamp_(-1, 0)
index += torch.arange(sample_size, device=x.device)
x = torch.gather(x, 1, index.unsqueeze(-1).expand(-1, -1, channels))
return x
class VQEmbeddingEMA(nn.Module):
def __init__(self, n_embeddings, embedding_dim, commitment_cost=0.25, decay=0.999, epsilon=1e-5):
super(VQEmbeddingEMA, self).__init__()
self.commitment_cost = commitment_cost
self.decay = decay
self.epsilon = epsilon
init_bound = 1 / 512
embedding = torch.Tensor(n_embeddings, embedding_dim)
embedding.uniform_(-init_bound, init_bound)
self.register_buffer("embedding", embedding)
self.register_buffer("ema_count", torch.zeros(n_embeddings))
self.register_buffer("ema_weight", self.embedding.clone())
def encode(self, x):
M, D = self.embedding.size()
x_flat = x.detach().reshape(-1, D)
distances = torch.addmm(torch.sum(self.embedding ** 2, dim=1) +
torch.sum(x_flat ** 2, dim=1, keepdim=True),
x_flat, self.embedding.t(),
alpha=-2.0, beta=1.0)
indices = torch.argmin(distances.float(), dim=-1)
quantized = F.embedding(indices, self.embedding)
quantized = quantized.view_as(x)
return quantized, indices
def forward(self, x):
M, D = self.embedding.size()
x_flat = x.detach().reshape(-1, D)
distances = torch.addmm(torch.sum(self.embedding ** 2, dim=1) +
torch.sum(x_flat ** 2, dim=1, keepdim=True),
x_flat, self.embedding.t(),
alpha=-2.0, beta=1.0)
indices = torch.argmin(distances.float(), dim=-1)
encodings = F.one_hot(indices, M).float()
quantized = F.embedding(indices, self.embedding)
quantized = quantized.view_as(x)
if self.training:
self.ema_count = self.decay * self.ema_count + (1 - self.decay) * torch.sum(encodings, dim=0)
n = torch.sum(self.ema_count)
self.ema_count = (self.ema_count + self.epsilon) / (n + M * self.epsilon) * n
dw = torch.matmul(encodings.t(), x_flat)
self.ema_weight = self.decay * self.ema_weight + (1 - self.decay) * dw
self.embedding = self.ema_weight / self.ema_count.unsqueeze(-1)
e_latent_loss = F.mse_loss(x, quantized.detach())
loss = self.commitment_cost * e_latent_loss
quantized = x + (quantized - x).detach()
avg_probs = torch.mean(encodings, dim=0)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))
return quantized, loss, perplexity
class Decoder(nn.Module):
def __init__(self, in_channels, n_speakers, speaker_embedding_dim,
conditioning_channels, mu_embedding_dim, rnn_channels,
fc_channels, bits, hop_length):
super().__init__()
self.rnn_channels = rnn_channels
self.quantization_channels = 2**bits
self.hop_length = hop_length
self.speaker_embedding = nn.Embedding(n_speakers, speaker_embedding_dim)
self.rnn1 = nn.GRU(in_channels + speaker_embedding_dim, conditioning_channels,
num_layers=2, batch_first=True, bidirectional=True)
self.mu_embedding = nn.Embedding(self.quantization_channels, mu_embedding_dim)
self.rnn2 = nn.GRU(mu_embedding_dim + 2*conditioning_channels, rnn_channels, batch_first=True)
self.fc1 = nn.Linear(rnn_channels, fc_channels)
self.fc2 = nn.Linear(fc_channels, self.quantization_channels)
def forward(self, x, z, speakers):
z = F.interpolate(z.transpose(1, 2), scale_factor=2)
z = z.transpose(1, 2)
speakers = self.speaker_embedding(speakers)
speakers = speakers.unsqueeze(1).expand(-1, z.size(1), -1)
z = torch.cat((z, speakers), dim=-1)
z, _ = self.rnn1(z)
z = F.interpolate(z.transpose(1, 2), scale_factor=self.hop_length)
z = z.transpose(1, 2)
x = self.mu_embedding(x)
x, _ = self.rnn2(torch.cat((x, z), dim=2))
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def generate(self, z, speaker):
output = []
cell = get_gru_cell(self.rnn2)
z = F.interpolate(z.transpose(1, 2), scale_factor=2)
z = z.transpose(1, 2)
speaker = self.speaker_embedding(speaker)
speaker = speaker.unsqueeze(1).expand(-1, z.size(1), -1)
z = torch.cat((z, speaker), dim=-1)
z, _ = self.rnn1(z)
z = F.interpolate(z.transpose(1, 2), scale_factor=self.hop_length)
z = z.transpose(1, 2)
batch_size, sample_size, _ = z.size()
h = torch.zeros(batch_size, self.rnn_channels, device=z.device)
x = torch.zeros(batch_size, device=z.device).fill_(self.quantization_channels // 2).long()
for m in tqdm(torch.unbind(z, dim=1), leave=False):
x = self.mu_embedding(x)
h = cell(torch.cat((x, m), dim=1), h)
x = F.relu(self.fc1(h))
logits = self.fc2(x)
dist = Categorical(logits=logits)
x = dist.sample()
output.append(2 * x.float().item() / (self.quantization_channels - 1.) - 1.)
output = np.asarray(output, dtype=np.float64)
output = mulaw_decode(output, self.quantization_channels)
return output
def speaker(self, speaker):
speaker = self.speaker_embedding(speaker)
return speaker
| 7,998 | 35.861751 | 105 | py |
EDGY | EDGY-master/Training/VQ-VAE/train_VQ.py | import hydra
from hydra import utils
from itertools import chain
from pathlib import Path
from tqdm import tqdm
import apex.amp as amp
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from dataset import SpeechDataset
from models import Encoder, Decoder
def save_checkpoint(encoder, decoder, optimizer, amp, scheduler, step, checkpoint_dir):
checkpoint_state = {
"encoder": encoder.state_dict(),
"decoder": decoder.state_dict(),
"optimizer": optimizer.state_dict(),
"amp": amp.state_dict(),
"scheduler": scheduler.state_dict(),
"step": step}
checkpoint_dir.mkdir(exist_ok=True, parents=True)
checkpoint_path = checkpoint_dir / "model.ckpt-{}.pt".format(step)
torch.save(checkpoint_state, checkpoint_path)
print("Saved checkpoint: {}".format(checkpoint_path.stem))
@hydra.main(config_path="Training/VQ-VAE/Configuration_files/training.yaml")
def train_model(cfg):
tensorboard_path = Path(utils.to_absolute_path("tensorboard")) / cfg.checkpoint_dir
checkpoint_dir = Path(utils.to_absolute_path(cfg.checkpoint_dir))
writer = SummaryWriter(tensorboard_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder = Encoder(**cfg.model.encoder)
decoder = Decoder(**cfg.model.decoder)
encoder.to(device)
decoder.to(device)
optimizer = optim.Adam(
chain(encoder.parameters(), decoder.parameters()),
lr=cfg.training.optimizer.lr)
[encoder, decoder], optimizer = amp.initialize([encoder, decoder], optimizer, opt_level="O1")
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=cfg.training.scheduler.milestones,
gamma=cfg.training.scheduler.gamma)
if cfg.resume:
print("Resume checkpoint from: {}:".format(cfg.resume))
resume_path = utils.to_absolute_path(cfg.resume)
checkpoint = torch.load(resume_path, map_location=lambda storage, loc: storage)
encoder.load_state_dict(checkpoint["encoder"])
decoder.load_state_dict(checkpoint["decoder"])
optimizer.load_state_dict(checkpoint["optimizer"])
amp.load_state_dict(checkpoint["amp"])
scheduler.load_state_dict(checkpoint["scheduler"])
global_step = checkpoint["step"]
else:
global_step = 0
root_path = Path(utils.to_absolute_path("datasets")) / cfg.dataset.path
dataset = SpeechDataset(
root=root_path,
hop_length=cfg.preprocessing.hop_length,
sr=cfg.preprocessing.sr,
sample_frames=cfg.training.sample_frames)
dataloader = DataLoader(
dataset,
batch_size=cfg.training.batch_size,
shuffle=True,
num_workers=cfg.training.n_workers,
pin_memory=True,
drop_last=True)
n_epochs = cfg.training.n_steps // len(dataloader) + 1
start_epoch = global_step // len(dataloader) + 1
for epoch in range(start_epoch, n_epochs + 1):
average_recon_loss = average_vq_loss = average_perplexity = 0
for i, (audio, mels, speakers) in enumerate(tqdm(dataloader), 1):
audio, mels, speakers = audio.to(device), mels.to(device), speakers.to(device)
optimizer.zero_grad()
z, vq_loss, perplexity = encoder(mels)
output = decoder(audio[:, :-1], z, speakers)
recon_loss = F.cross_entropy(output.transpose(1, 2), audio[:, 1:])
loss = recon_loss + vq_loss
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), 1)
optimizer.step()
scheduler.step()
average_recon_loss += (recon_loss.item() - average_recon_loss) / i
average_vq_loss += (vq_loss.item() - average_vq_loss) / i
average_perplexity += (perplexity.item() - average_perplexity) / i
global_step += 1
if global_step % cfg.training.checkpoint_interval == 0:
save_checkpoint(
encoder, decoder, optimizer, amp,
scheduler, global_step, checkpoint_dir)
writer.add_scalar("recon_loss/train", average_recon_loss, global_step)
writer.add_scalar("vq_loss/train", average_vq_loss, global_step)
writer.add_scalar("average_perplexity", average_perplexity, global_step)
print("epoch:{}, recon loss:{:.2E}, vq loss:{:.2E}, perpexlity:{:.3f}"
.format(epoch, average_recon_loss, average_vq_loss, average_perplexity))
if __name__ == "__main__":
train_model()
| 4,714 | 37.647541 | 97 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/utils_incremental/incremental_train_and_eval_AMR_LF.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import copy
import argparse
from PIL import Image
from scipy.spatial.distance import cdist
from sklearn.metrics import confusion_matrix
from utils_pytorch import *
cur_features = []
ref_features = []
old_scores = []
new_scores = []
def get_ref_features(self, inputs, outputs):
global ref_features
ref_features = inputs[0]
def get_cur_features(self, inputs, outputs):
global cur_features
cur_features = inputs[0]
def get_old_scores_before_scale(self, inputs, outputs):
global old_scores
old_scores = outputs
def get_new_scores_before_scale(self, inputs, outputs):
global new_scores
new_scores = outputs
def incremental_train_and_eval_AMR_LF(epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iteration, \
lamda, \
dist, K, lw_mr, \
weight_per_class=None, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#trainset.train_data = X_train.astype('uint8')
#trainset.train_labels = Y_train
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
# shuffle=True, num_workers=2)
#testset.test_data = X_valid.astype('uint8')
#testset.test_labels = Y_valid
#testloader = torch.utils.data.DataLoader(testset, batch_size=100,
# shuffle=False, num_workers=2)
#print('Max and Min of train labels: {}, {}'.format(min(Y_train), max(Y_train)))
#print('Max and Min of valid labels: {}, {}'.format(min(Y_valid), max(Y_valid)))
if iteration > start_iteration:
ref_model.eval()
num_old_classes = ref_model.fc.out_features
handle_ref_features = ref_model.fc.register_forward_hook(get_ref_features)
handle_cur_features = tg_model.fc.register_forward_hook(get_cur_features)
handle_old_scores_bs = tg_model.fc.fc1.register_forward_hook(get_old_scores_before_scale)
handle_new_scores_bs = tg_model.fc.fc2.register_forward_hook(get_new_scores_before_scale)
for epoch in range(epochs):
#train
tg_model.train()
train_loss = 0
train_loss1 = 0
train_loss2 = 0
train_loss3 = 0
correct = 0
total = 0
tg_lr_scheduler.step()
print('\nEpoch: %d, LR: ' % epoch, end='')
print(tg_lr_scheduler.get_lr())
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
tg_optimizer.zero_grad()
outputs = tg_model(inputs)
if iteration == start_iteration:
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
else:
ref_outputs = ref_model(inputs)
loss1 = nn.CosineEmbeddingLoss()(cur_features, ref_features.detach(), \
torch.ones(inputs.shape[0]).to(device)) * lamda
loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
#################################################
#scores before scale, [-1, 1]
outputs_bs = torch.cat((old_scores, new_scores), dim=1)
assert(outputs_bs.size()==outputs.size())
#print("outputs_bs:", outputs_bs.size(), outputs_bs)
#print("targets:", targets.size(), targets)
#get groud truth scores
gt_index = torch.zeros(outputs_bs.size()).to(device)
gt_index = gt_index.scatter(1, targets.view(-1,1), 1).ge(0.5)
gt_scores = outputs_bs.masked_select(gt_index)
#print("gt_index:", gt_index.size(), gt_index)
#print("gt_scores:", gt_scores.size(), gt_scores)
#get top-K scores on none gt classes
none_gt_index = torch.zeros(outputs_bs.size()).to(device)
none_gt_index = none_gt_index.scatter(1, targets.view(-1,1), 1).le(0.5)
none_gt_scores = outputs_bs.masked_select(none_gt_index).reshape((outputs_bs.size(0), outputs.size(1)-1))
#print("none_gt_index:", none_gt_index.size(), none_gt_index)
#print("none_gt_scores:", none_gt_scores.size(), none_gt_scores)
hard_scores = none_gt_scores.topk(K, dim=1)[0]
#print("hard_scores:", hard_scores.size(), hard_scores)
#the index of hard samples, i.e., samples of old classes
hard_index = targets.lt(num_old_classes)
hard_num = torch.nonzero(hard_index).size(0)
#print("hard examples size: ", hard_num)
if hard_num > 0:
gt_scores = gt_scores[hard_index].view(-1, 1).repeat(1, K)
hard_scores = hard_scores[hard_index]
assert(gt_scores.size() == hard_scores.size())
assert(gt_scores.size(0) == hard_num)
#print("hard example gt scores: ", gt_scores.size(), gt_scores)
#print("hard example max novel scores: ", hard_scores.size(), hard_scores)
loss3 = nn.MarginRankingLoss(margin=dist)(gt_scores.view(-1, 1), \
hard_scores.view(-1, 1), torch.ones(hard_num*K).to(device)) * lw_mr
else:
loss3 = torch.zeros(1).to(device)
#################################################
loss = loss1 + loss2 + loss3
loss.backward()
tg_optimizer.step()
train_loss += loss.item()
if iteration > start_iteration:
train_loss1 += loss1.item()
train_loss2 += loss2.item()
train_loss3 += loss3.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#if iteration == 0:
# msg = 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % \
# (train_loss/(batch_idx+1), 100.*correct/total, correct, total)
#else:
# msg = 'Loss1: %.3f Loss2: %.3f Loss: %.3f | Acc: %.3f%% (%d/%d)' % \
# (loss1.item(), loss2.item(), train_loss/(batch_idx+1), 100.*correct/total, correct, total)
#progress_bar(batch_idx, len(trainloader), msg)
if iteration == start_iteration:
print('Train set: {}, Train Loss: {:.4f} Acc: {:.4f}'.format(\
len(trainloader), train_loss/(batch_idx+1), 100.*correct/total))
else:
print('Train set: {}, Train Loss1: {:.4f}, Train Loss2: {:.4f}, Train Loss3: {:.4f},\
Train Loss: {:.4f} Acc: {:.4f}'.format(len(trainloader), \
train_loss1/(batch_idx+1), train_loss2/(batch_idx+1), train_loss3/(batch_idx+1),
train_loss/(batch_idx+1), 100.*correct/total))
#eval
tg_model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = tg_model(inputs)
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
print('Test set: {} Test Loss: {:.4f} Acc: {:.4f}'.format(\
len(testloader), test_loss/(batch_idx+1), 100.*correct/total))
if iteration > start_iteration:
print("Removing register_forward_hook")
handle_ref_features.remove()
handle_cur_features.remove()
handle_old_scores_bs.remove()
handle_new_scores_bs.remove()
return tg_model | 8,384 | 45.071429 | 121 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/utils_incremental/compute_confusion_matrix.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import copy
import argparse
from PIL import Image
from scipy.spatial.distance import cdist
from sklearn.metrics import confusion_matrix
from utils_pytorch import *
def compute_confusion_matrix(tg_model, tg_feature_model, class_means, evalloader, print_info=False, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tg_model.eval()
tg_feature_model.eval()
#evalset = torchvision.datasets.CIFAR100(root='./data', train=False,
# download=False, transform=transform_test)
#evalset.test_data = input_data.astype('uint8')
#evalset.test_labels = input_labels
#evalloader = torch.utils.data.DataLoader(evalset, batch_size=128,
# shuffle=False, num_workers=2)
correct = 0
correct_icarl = 0
correct_ncm = 0
total = 0
num_classes = tg_model.fc.out_features
cm = np.zeros((3, num_classes, num_classes))
all_targets = []
all_predicted = []
all_predicted_icarl = []
all_predicted_ncm = []
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
all_targets.append(targets)
outputs = tg_model(inputs)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
all_predicted.append(predicted)
outputs_feature = np.squeeze(tg_feature_model(inputs))
# Compute score for iCaRL
sqd_icarl = cdist(class_means[:,:,0].T, outputs_feature, 'sqeuclidean')
score_icarl = torch.from_numpy((-sqd_icarl).T).to(device)
_, predicted_icarl = score_icarl.max(1)
correct_icarl += predicted_icarl.eq(targets).sum().item()
all_predicted_icarl.append(predicted_icarl)
# Compute score for NCM
sqd_ncm = cdist(class_means[:,:,1].T, outputs_feature, 'sqeuclidean')
score_ncm = torch.from_numpy((-sqd_ncm).T).to(device)
_, predicted_ncm = score_ncm.max(1)
correct_ncm += predicted_ncm.eq(targets).sum().item()
all_predicted_ncm.append(predicted_ncm)
# print(sqd_icarl.shape, score_icarl.shape, predicted_icarl.shape, \
# sqd_ncm.shape, score_ncm.shape, predicted_ncm.shape)
cm[0, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted))
cm[1, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted_icarl))
cm[2, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted_ncm))
if print_info:
print(" top 1 accuracy CNN :\t\t{:.2f} %".format( 100.*correct/total ))
print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format( 100.*correct_icarl/total ))
print(" top 1 accuracy NCM :\t\t{:.2f} %".format( 100.*correct_ncm/total ))
print(" top 1 accuracy CNN :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[0])/np.sum(cm[0],axis=1)) ))
print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[1])/np.sum(cm[1],axis=1)) ))
print(" top 1 accuracy NCM :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[2])/np.sum(cm[2],axis=1)) ))
return cm
| 3,725 | 43.357143 | 122 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/utils_incremental/incremental_train_and_eval_MS.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import copy
import argparse
from PIL import Image
from scipy.spatial.distance import cdist
from sklearn.metrics import confusion_matrix
from utils_pytorch import *
def get_old_scores_before_scale(self, inputs, outputs):
global old_scores
old_scores = outputs
def get_new_scores_before_scale(self, inputs, outputs):
global new_scores
new_scores = outputs
def incremental_train_and_eval_MS(epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iteration, \
lw_ms, \
fix_bn=False, weight_per_class=None, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#trainset.train_data = X_train.astype('uint8')
#trainset.train_labels = Y_train
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
# shuffle=True, num_workers=2)
#testset.test_data = X_valid.astype('uint8')
#testset.test_labels = Y_valid
#testloader = torch.utils.data.DataLoader(testset, batch_size=100,
# shuffle=False, num_workers=2)
#print('Max and Min of train labels: {}, {}'.format(min(Y_train), max(Y_train)))
#print('Max and Min of valid labels: {}, {}'.format(min(Y_valid), max(Y_valid)))
if iteration > start_iteration:
ref_model.eval()
num_old_classes = ref_model.fc.out_features
handle_old_scores_bs = tg_model.fc.fc1.register_forward_hook(get_old_scores_before_scale)
handle_new_scores_bs = tg_model.fc.fc2.register_forward_hook(get_new_scores_before_scale)
for epoch in range(epochs):
#train
tg_model.train()
if fix_bn:
for m in tg_model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
#m.weight.requires_grad = False
#m.bias.requires_grad = False
train_loss = 0
train_loss1 = 0
train_loss2 = 0
correct = 0
total = 0
tg_lr_scheduler.step()
print('\nEpoch: %d, LR: ' % epoch, end='')
print(tg_lr_scheduler.get_lr())
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
tg_optimizer.zero_grad()
outputs = tg_model(inputs)
if iteration == start_iteration:
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
else:
ref_outputs = ref_model(inputs)
#loss1 = nn.KLDivLoss()(F.log_softmax(outputs[:,:num_old_classes]/T, dim=1), \
# F.softmax(ref_outputs.detach()/T, dim=1)) * T * T * beta * num_old_classes
ref_scores = ref_outputs.detach() / ref_model.fc.sigma.detach()
loss1 = nn.MSELoss()(old_scores, ref_scores.detach()) * lw_ms * num_old_classes
loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
loss = loss1 + loss2
loss.backward()
tg_optimizer.step()
train_loss += loss.item()
if iteration > start_iteration:
train_loss1 += loss1.item()
train_loss2 += loss2.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#if iteration == 0:
# msg = 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % \
# (train_loss/(batch_idx+1), 100.*correct/total, correct, total)
#else:
# msg = 'Loss1: %.3f Loss2: %.3f Loss: %.3f | Acc: %.3f%% (%d/%d)' % \
# (loss1.item(), loss2.item(), train_loss/(batch_idx+1), 100.*correct/total, correct, total)
#progress_bar(batch_idx, len(trainloader), msg)
if iteration == start_iteration:
print('Train set: {}, Train Loss: {:.4f} Acc: {:.4f}'.format(\
len(trainloader), train_loss/(batch_idx+1), 100.*correct/total))
else:
print('Train set: {}, Train Loss1: {:.4f}, Train Loss2: {:.4f},\
Train Loss: {:.4f} Acc: {:.4f}'.format(len(trainloader), \
train_loss1/(batch_idx+1), train_loss2/(batch_idx+1),
train_loss/(batch_idx+1), 100.*correct/total))
#eval
tg_model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = tg_model(inputs)
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
print('Test set: {} Test Loss: {:.4f} Acc: {:.4f}'.format(\
len(testloader), test_loss/(batch_idx+1), 100.*correct/total))
if iteration > start_iteration:
print("Removing register_forward_hook")
handle_old_scores_bs.remove()
handle_new_scores_bs.remove()
return tg_model | 5,759 | 41.666667 | 107 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/utils_incremental/incremental_train_and_eval.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import copy
import argparse
from PIL import Image
from scipy.spatial.distance import cdist
from sklearn.metrics import confusion_matrix
from utils_pytorch import *
def incremental_train_and_eval(epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iteration, \
T, beta, \
fix_bn=False, weight_per_class=None, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#trainset.train_data = X_train.astype('uint8')
#trainset.train_labels = Y_train
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
# shuffle=True, num_workers=2)
#testset.test_data = X_valid.astype('uint8')
#testset.test_labels = Y_valid
#testloader = torch.utils.data.DataLoader(testset, batch_size=100,
# shuffle=False, num_workers=2)
#print('Max and Min of train labels: {}, {}'.format(min(Y_train), max(Y_train)))
#print('Max and Min of valid labels: {}, {}'.format(min(Y_valid), max(Y_valid)))
if iteration > start_iteration:
ref_model.eval()
num_old_classes = ref_model.fc.out_features
for epoch in range(epochs):
#train
tg_model.train()
if fix_bn:
for m in tg_model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
#m.weight.requires_grad = False
#m.bias.requires_grad = False
train_loss = 0
train_loss1 = 0
train_loss2 = 0
correct = 0
total = 0
tg_lr_scheduler.step()
print('\nEpoch: %d, LR: ' % epoch, end='')
print(tg_lr_scheduler.get_lr())
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
tg_optimizer.zero_grad()
outputs = tg_model(inputs)
if iteration == start_iteration:
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
else:
ref_outputs = ref_model(inputs)
loss1 = nn.KLDivLoss()(F.log_softmax(outputs[:,:num_old_classes]/T, dim=1), \
F.softmax(ref_outputs.detach()/T, dim=1)) * T * T * beta * num_old_classes
loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
loss = loss1 + loss2
loss.backward()
tg_optimizer.step()
train_loss += loss.item()
if iteration > start_iteration:
train_loss1 += loss1.item()
train_loss2 += loss2.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#if iteration == 0:
# msg = 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % \
# (train_loss/(batch_idx+1), 100.*correct/total, correct, total)
#else:
# msg = 'Loss1: %.3f Loss2: %.3f Loss: %.3f | Acc: %.3f%% (%d/%d)' % \
# (loss1.item(), loss2.item(), train_loss/(batch_idx+1), 100.*correct/total, correct, total)
#progress_bar(batch_idx, len(trainloader), msg)
if iteration == start_iteration:
print('Train set: {}, Train Loss: {:.4f} Acc: {:.4f}'.format(\
len(trainloader), train_loss/(batch_idx+1), 100.*correct/total))
else:
print('Train set: {}, Train Loss1: {:.4f}, Train Loss2: {:.4f},\
Train Loss: {:.4f} Acc: {:.4f}'.format(len(trainloader), \
train_loss1/(batch_idx+1), train_loss2/(batch_idx+1),
train_loss/(batch_idx+1), 100.*correct/total))
#eval
tg_model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = tg_model(inputs)
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
print('Test set: {} Test Loss: {:.4f} Acc: {:.4f}'.format(\
len(testloader), test_loss/(batch_idx+1), 100.*correct/total))
return tg_model | 5,014 | 41.5 | 107 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/utils_incremental/compute_accuracy.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import copy
import argparse
from PIL import Image
from scipy.spatial.distance import cdist
from sklearn.metrics import confusion_matrix
from utils_pytorch import *
def compute_accuracy(tg_model, tg_feature_model, class_means, evalloader, scale=None, print_info=True, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tg_model.eval()
tg_feature_model.eval()
#evalset = torchvision.datasets.CIFAR100(root='./data', train=False,
# download=False, transform=transform_test)
#evalset.test_data = input_data.astype('uint8')
#evalset.test_labels = input_labels
#evalloader = torch.utils.data.DataLoader(evalset, batch_size=128,
# shuffle=False, num_workers=2)
correct = 0
correct_icarl = 0
correct_ncm = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
outputs = tg_model(inputs)
outputs = F.softmax(outputs, dim=1)
if scale is not None:
assert(scale.shape[0] == 1)
assert(outputs.shape[1] == scale.shape[1])
outputs = outputs / scale.repeat(outputs.shape[0], 1).type(torch.FloatTensor).to(device)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
outputs_feature = np.squeeze(tg_feature_model(inputs))
# Compute score for iCaRL
sqd_icarl = cdist(class_means[:,:,0].T, outputs_feature, 'sqeuclidean')
score_icarl = torch.from_numpy((-sqd_icarl).T).to(device)
_, predicted_icarl = score_icarl.max(1)
correct_icarl += predicted_icarl.eq(targets).sum().item()
# Compute score for NCM
sqd_ncm = cdist(class_means[:,:,1].T, outputs_feature, 'sqeuclidean')
score_ncm = torch.from_numpy((-sqd_ncm).T).to(device)
_, predicted_ncm = score_ncm.max(1)
correct_ncm += predicted_ncm.eq(targets).sum().item()
# print(sqd_icarl.shape, score_icarl.shape, predicted_icarl.shape, \
# sqd_ncm.shape, score_ncm.shape, predicted_ncm.shape)
if print_info:
print(" top 1 accuracy CNN :\t\t{:.2f} %".format(100.*correct/total))
print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format(100.*correct_icarl/total))
print(" top 1 accuracy NCM :\t\t{:.2f} %".format(100.*correct_ncm/total))
cnn_acc = 100.*correct/total
icarl_acc = 100.*correct_icarl/total
ncm_acc = 100.*correct_ncm/total
return [cnn_acc, icarl_acc, ncm_acc]
| 3,097 | 40.306667 | 116 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/utils_incremental/compute_features.py | #!/usr/bin/env python
# coding=utf-8
#!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import copy
import argparse
from PIL import Image
from scipy.spatial.distance import cdist
from sklearn.metrics import confusion_matrix
from utils_pytorch import *
def compute_features(tg_feature_model, evalloader, num_samples, num_features, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tg_feature_model.eval()
#evalset = torchvision.datasets.CIFAR100(root='./data', train=False,
# download=False, transform=transform_test)
#evalset.test_data = input_data.astype('uint8')
#evalset.test_labels = np.zeros(input_data.shape[0])
#evalloader = torch.utils.data.DataLoader(evalset, batch_size=128,
# shuffle=False, num_workers=2)
features = np.zeros([num_samples, num_features])
start_idx = 0
with torch.no_grad():
for inputs, targets in evalloader:
inputs = inputs.to(device)
features[start_idx:start_idx+inputs.shape[0], :] = np.squeeze(tg_feature_model(inputs))
start_idx = start_idx+inputs.shape[0]
assert(start_idx==num_samples)
return features
| 1,503 | 33.181818 | 99 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/utils_incremental/incremental_train_and_eval_LF.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import copy
import argparse
from PIL import Image
from scipy.spatial.distance import cdist
from sklearn.metrics import confusion_matrix
from utils_pytorch import *
cur_features = []
ref_features = []
def get_ref_features(self, inputs, outputs):
global ref_features
ref_features = inputs[0]
def get_cur_features(self, inputs, outputs):
global cur_features
cur_features = inputs[0]
def incremental_train_and_eval_LF(epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iteration, \
lamda, \
fix_bn=False, weight_per_class=None, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#trainset.train_data = X_train.astype('uint8')
#trainset.train_labels = Y_train
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
# shuffle=True, num_workers=2)
#testset.test_data = X_valid.astype('uint8')
#testset.test_labels = Y_valid
#testloader = torch.utils.data.DataLoader(testset, batch_size=100,
# shuffle=False, num_workers=2)
#print('Max and Min of train labels: {}, {}'.format(min(Y_train), max(Y_train)))
#print('Max and Min of valid labels: {}, {}'.format(min(Y_valid), max(Y_valid)))
if iteration > start_iteration:
ref_model.eval()
handle_ref_features = ref_model.fc.register_forward_hook(get_ref_features)
handle_cur_features = tg_model.fc.register_forward_hook(get_cur_features)
for epoch in range(epochs):
#train
tg_model.train()
if fix_bn:
for m in tg_model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
#m.weight.requires_grad = False
#m.bias.requires_grad = False
train_loss = 0
train_loss1 = 0
train_loss2 = 0
correct = 0
total = 0
tg_lr_scheduler.step()
print('\nEpoch: %d, LR: ' % epoch, end='')
print(tg_lr_scheduler.get_lr())
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
tg_optimizer.zero_grad()
outputs = tg_model(inputs)
if iteration == start_iteration:
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
else:
ref_outputs = ref_model(inputs)
loss1 = nn.CosineEmbeddingLoss()(cur_features, ref_features.detach(), \
torch.ones(inputs.shape[0]).to(device)) * lamda
loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
loss = loss1 + loss2
loss.backward()
tg_optimizer.step()
train_loss += loss.item()
if iteration > start_iteration:
train_loss1 += loss1.item()
train_loss2 += loss2.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#if iteration == 0:
# msg = 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % \
# (train_loss/(batch_idx+1), 100.*correct/total, correct, total)
#else:
# msg = 'Loss1: %.3f Loss2: %.3f Loss: %.3f | Acc: %.3f%% (%d/%d)' % \
# (loss1.item(), loss2.item(), train_loss/(batch_idx+1), 100.*correct/total, correct, total)
#progress_bar(batch_idx, len(trainloader), msg)
if iteration == start_iteration:
print('Train set: {}, Train Loss: {:.4f} Acc: {:.4f}'.format(\
len(trainloader), train_loss/(batch_idx+1), 100.*correct/total))
else:
print('Train set: {}, Train Loss1: {:.4f}, Train Loss2: {:.4f},\
Train Loss: {:.4f} Acc: {:.4f}'.format(len(trainloader), \
train_loss1/(batch_idx+1), train_loss2/(batch_idx+1),
train_loss/(batch_idx+1), 100.*correct/total))
#eval
tg_model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = tg_model(inputs)
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
print('Test set: {} Test Loss: {:.4f} Acc: {:.4f}'.format(\
len(testloader), test_loss/(batch_idx+1), 100.*correct/total))
if iteration > start_iteration:
print("Removing register_forward_hook")
handle_ref_features.remove()
handle_cur_features.remove()
return tg_model | 5,489 | 39.970149 | 107 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/utils_incremental/incremental_train_and_eval_MR_LF.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import copy
import argparse
from PIL import Image
from scipy.spatial.distance import cdist
from sklearn.metrics import confusion_matrix
from utils_pytorch import *
cur_features = []
ref_features = []
old_scores = []
new_scores = []
def get_ref_features(self, inputs, outputs):
global ref_features
ref_features = inputs[0]
def get_cur_features(self, inputs, outputs):
global cur_features
cur_features = inputs[0]
def get_old_scores_before_scale(self, inputs, outputs):
global old_scores
old_scores = outputs
def get_new_scores_before_scale(self, inputs, outputs):
global new_scores
new_scores = outputs
def incremental_train_and_eval_MR_LF(epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iteration, \
lamda, \
dist, K, lw_mr, \
fix_bn=False, weight_per_class=None, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#trainset.train_data = X_train.astype('uint8')
#trainset.train_labels = Y_train
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
# shuffle=True, num_workers=2)
#testset.test_data = X_valid.astype('uint8')
#testset.test_labels = Y_valid
#testloader = torch.utils.data.DataLoader(testset, batch_size=100,
# shuffle=False, num_workers=2)
#print('Max and Min of train labels: {}, {}'.format(min(Y_train), max(Y_train)))
#print('Max and Min of valid labels: {}, {}'.format(min(Y_valid), max(Y_valid)))
if iteration > start_iteration:
ref_model.eval()
num_old_classes = ref_model.fc.out_features
handle_ref_features = ref_model.fc.register_forward_hook(get_ref_features)
handle_cur_features = tg_model.fc.register_forward_hook(get_cur_features)
handle_old_scores_bs = tg_model.fc.fc1.register_forward_hook(get_old_scores_before_scale)
handle_new_scores_bs = tg_model.fc.fc2.register_forward_hook(get_new_scores_before_scale)
for epoch in range(epochs):
#train
tg_model.train()
if fix_bn:
for m in tg_model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
#m.weight.requires_grad = False
#m.bias.requires_grad = False
train_loss = 0
train_loss1 = 0
train_loss2 = 0
train_loss3 = 0
correct = 0
total = 0
tg_lr_scheduler.step()
print('\nEpoch: %d, LR: ' % epoch, end='')
print(tg_lr_scheduler.get_lr())
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
tg_optimizer.zero_grad()
outputs = tg_model(inputs)
if iteration == start_iteration:
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
else:
ref_outputs = ref_model(inputs)
loss1 = nn.CosineEmbeddingLoss()(cur_features, ref_features.detach(), \
torch.ones(inputs.shape[0]).to(device)) * lamda
loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
#################################################
#scores before scale, [-1, 1]
outputs_bs = torch.cat((old_scores, new_scores), dim=1)
#print(tg_model.fc.fc1.in_features, tg_model.fc.fc1.out_features)
#print(tg_model.fc.fc2.in_features, tg_model.fc.fc2.out_features)
#print(old_scores.size(), new_scores.size(), outputs_bs.size(), outputs.size())
assert(outputs_bs.size()==outputs.size())
#get groud truth scores
gt_index = torch.zeros(outputs_bs.size()).to(device)
gt_index = gt_index.scatter(1, targets.view(-1,1), 1).ge(0.5)
gt_scores = outputs_bs.masked_select(gt_index)
#get top-K scores on novel classes
max_novel_scores = outputs_bs[:, num_old_classes:].topk(K, dim=1)[0]
#the index of hard samples, i.e., samples of old classes
hard_index = targets.lt(num_old_classes)
hard_num = torch.nonzero(hard_index).size(0)
#print("hard examples size: ", hard_num)
if hard_num > 0:
gt_scores = gt_scores[hard_index].view(-1, 1).repeat(1, K)
max_novel_scores = max_novel_scores[hard_index]
assert(gt_scores.size() == max_novel_scores.size())
assert(gt_scores.size(0) == hard_num)
#print("hard example gt scores: ", gt_scores.size(), gt_scores)
#print("hard example max novel scores: ", max_novel_scores.size(), max_novel_scores)
loss3 = nn.MarginRankingLoss(margin=dist)(gt_scores.view(-1, 1), \
max_novel_scores.view(-1, 1), torch.ones(hard_num*K).to(device)) * lw_mr
else:
loss3 = torch.zeros(1).to(device)
#################################################
loss = loss1 + loss2 + loss3
loss.backward()
tg_optimizer.step()
train_loss += loss.item()
if iteration > start_iteration:
train_loss1 += loss1.item()
train_loss2 += loss2.item()
train_loss3 += loss3.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#if iteration == 0:
# msg = 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % \
# (train_loss/(batch_idx+1), 100.*correct/total, correct, total)
#else:
# msg = 'Loss1: %.3f Loss2: %.3f Loss: %.3f | Acc: %.3f%% (%d/%d)' % \
# (loss1.item(), loss2.item(), train_loss/(batch_idx+1), 100.*correct/total, correct, total)
#progress_bar(batch_idx, len(trainloader), msg)
if iteration == start_iteration:
print('Train set: {}, Train Loss: {:.4f} Acc: {:.4f}'.format(\
len(trainloader), train_loss/(batch_idx+1), 100.*correct/total))
else:
print('Train set: {}, Train Loss1: {:.4f}, Train Loss2: {:.4f}, Train Loss3: {:.4f},\
Train Loss: {:.4f} Acc: {:.4f}'.format(len(trainloader), \
train_loss1/(batch_idx+1), train_loss2/(batch_idx+1), train_loss3/(batch_idx+1),
train_loss/(batch_idx+1), 100.*correct/total))
#eval
tg_model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = tg_model(inputs)
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
print('Test set: {} Test Loss: {:.4f} Acc: {:.4f}'.format(\
len(testloader), test_loss/(batch_idx+1), 100.*correct/total))
if iteration > start_iteration:
print("Removing register_forward_hook")
handle_ref_features.remove()
handle_cur_features.remove()
handle_old_scores_bs.remove()
handle_new_scores_bs.remove()
return tg_model | 8,171 | 44.149171 | 107 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/gen_imagenet_subset.py | #!/usr/bin/env python
# coding=utf-8
import argparse
import os
import random
import shutil
import time
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
data_dir = 'data/imagenet/data/'
# Data loading code
traindir = os.path.join(data_dir, 'train')
train_dataset = datasets.ImageFolder(traindir, None)
classes = train_dataset.classes
print("the number of total classes: {}".format(len(classes)))
seed = 1993
np.random.seed(seed)
subset_num = 100
subset_classes = np.random.choice(classes, subset_num, replace=False)
print("the number of subset classes: {}".format(len(subset_classes)))
print(subset_classes)
des_root_dir = 'data/seed_{}_subset_{}_imagenet/data/'.format(seed, subset_num)
if not os.path.exists(des_root_dir):
os.makedirs(des_root_dir)
phase_list = ['train', 'val']
for phase in phase_list:
if not os.path.exists(os.path.join(des_root_dir, phase)):
os.mkdir(os.path.join(des_root_dir, phase))
for sc in subset_classes:
src_dir = os.path.join(data_dir, phase, sc)
des_dir = os.path.join(des_root_dir, phase, sc)
cmd = "cp -r {} {}".format(src_dir, des_dir)
print(cmd)
os.system(cmd)
print("Hello World")
| 1,499 | 26.777778 | 79 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/resnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 6,582 | 29.906103 | 90 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/utils_pytorch.py | #!/usr/bin/env python
# coding=utf-8
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.nn.init as init
from collections import OrderedDict
import numpy as np
import os
import os.path as osp
import sys
import time
import math
import subprocess
try:
import cPickle as pickle
except:
import pickle
def savepickle(data, file_path):
mkdir_p(osp.dirname(file_path), delete=False)
print('pickle into', file_path)
with open(file_path, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def unpickle(file_path):
with open(file_path, 'rb') as f:
data = pickle.load(f)
return data
def mkdir_p(path, delete=False, print_info=True):
if path == '': return
if delete:
subprocess.call(('rm -r ' + path).split())
if not osp.exists(path):
if print_info:
print('mkdir -p ' + path)
subprocess.call(('mkdir -p ' + path).split())
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=1e-3)
if m.bias is not None:
init.constant_(m.bias, 0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f | 4,102 | 26.172185 | 96 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/eval_cumul_acc.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import sys
import copy
import argparse
from PIL import Image
try:
import cPickle as pickle
except:
import pickle
from scipy.spatial.distance import cdist
import utils_pytorch
from utils_imagenet.utils_dataset import split_images_labels
from utils_imagenet.utils_dataset import merge_images_labels
from utils_incremental.compute_features import compute_features
from utils_incremental.compute_accuracy import compute_accuracy
from utils_incremental.compute_confusion_matrix import compute_confusion_matrix
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
######### Modifiable Settings ##########
parser = argparse.ArgumentParser()
parser.add_argument('--datadir', default='data/seed_1993_subset_100_imagenet/data', type=str)
parser.add_argument('--num_classes', default=100, type=int)
parser.add_argument('--nb_cl', default=10, type=int, \
help='Classes per group')
parser.add_argument('--ckp_prefix', \
default='checkpoint/class_incremental_imagenet_nb_cl_fg_50_nb_cl_10_nb_protos_200_run_0_', \
type=str)
parser.add_argument('--order', \
default='./checkpoint/seed_1993_subset_100_imagenet_order_run_0.pkl', \
type=str)
parser.add_argument('--nb_cl_fg', default=50, type=int, \
help='the number of classes in first group')
args = parser.parse_args()
print(args)
order = utils_pytorch.unpickle(args.order)
order_list = list(order)
# transform_test = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
# ])
# evalset = torchvision.datasets.CIFAR100(root='./data', train=False,
# download=False, transform=transform_test)
# input_data = evalset.test_data
# input_labels = evalset.test_labels
# map_input_labels = np.array([order_list.index(i) for i in input_labels])
valdir = os.path.join(args.datadir, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
evalset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
input_data, input_labels = split_images_labels(evalset.imgs)
map_input_labels = np.array([order_list.index(i) for i in input_labels])
# evalset.test_labels = map_input_labels
# evalloader = torch.utils.data.DataLoader(evalset, batch_size=128,
# shuffle=False, num_workers=2)
cnn_cumul_acc = []
icarl_cumul_acc = []
ncm_cumul_acc = []
num_classes = []
nb_cl = args.nb_cl
start_iter = int(args.nb_cl_fg/nb_cl)-1
for iteration in range(start_iter, int(args.num_classes/nb_cl)):
# print("###########################################################")
# print("For iteration {}".format(iteration))
# print("###########################################################")
ckp_name = '{}iteration_{}_model.pth'.format(args.ckp_prefix, iteration)
class_means_name = '{}iteration_{}_class_means.pth'.format(args.ckp_prefix, iteration)
if not os.path.exists(ckp_name):
break
tg_model = torch.load(ckp_name)
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
class_means = torch.load(class_means_name)
current_means = class_means[:, order[:(iteration+1)*nb_cl]]
indices = np.array([i in range(0, (iteration+1)*nb_cl) for i in map_input_labels])
# evalset.test_data = input_data[indices]
# evalset.test_labels = map_input_labels[indices]
# print('Max and Min of valid labels: {}, {}'.format(min(evalset.test_labels), max(evalset.test_labels)))
current_eval_set = merge_images_labels(input_data[indices], map_input_labels[indices])
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=128,
shuffle=False, num_workers=8, pin_memory=True)
print("###########################################################")
acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader, print_info=True)
print("###########################################################")
cnn_cumul_acc.append(acc[0])
icarl_cumul_acc.append(acc[1])
ncm_cumul_acc.append(acc[2])
num_classes.append((iteration+1)*nb_cl)
print("###########################################################")
print(' CNN acc: \t iCaRL acc \t NCM acc')
print("###########################################################")
for i in range(len(cnn_cumul_acc)):
print("{:.2f} ".format(cnn_cumul_acc[i]), end='')
print("[{:.2f}] ".format(np.mean(cnn_cumul_acc[-1])), end='')
print("[{:.2f}] ".format(np.mean(cnn_cumul_acc)), end='')
print("[{:.2f}] ".format(np.sum(np.array(cnn_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='')
print("")
for i in range(len(icarl_cumul_acc)):
print("{:.2f} ".format(icarl_cumul_acc[i]), end='')
print("[{:.2f}] ".format(np.mean(icarl_cumul_acc[-1])), end='')
print("[{:.2f}] ".format(np.mean(icarl_cumul_acc)), end='')
print("[{:.2f}] ".format(np.sum(np.array(icarl_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='')
print("")
for i in range(len(cnn_cumul_acc)):
print("{:.2f} ".format(ncm_cumul_acc[i]), end='')
print("[{:.2f}] ".format(np.mean(ncm_cumul_acc[-1])), end='')
print("[{:.2f}] ".format(np.mean(ncm_cumul_acc)), end='')
print("[{:.2f}] ".format(np.sum(np.array(ncm_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='')
print("")
print("###########################################################")
print("")
print('Number of classes', num_classes)
print("###########################################################")
print("Final acc on all classes")
print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format(cnn_cumul_acc[-1], icarl_cumul_acc[-1], ncm_cumul_acc[-1]))
print("###########################################################")
print("Average acc in each phase")
print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format(np.mean(cnn_cumul_acc), np.mean(icarl_cumul_acc), np.mean(ncm_cumul_acc)))
print("###########################################################")
print("Weighted average acc in each phase")
print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format(
np.sum(np.array(cnn_cumul_acc)*np.array(num_classes)) / np.sum(num_classes),
np.sum(np.array(icarl_cumul_acc)*np.array(num_classes)) / np.sum(num_classes),
np.sum(np.array(ncm_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)
))
print("###########################################################") | 6,830 | 45.469388 | 128 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/class_incremental_imagenet.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import sys
import copy
import argparse
from PIL import Image
try:
import cPickle as pickle
except:
import pickle
import utils_pytorch
from utils_imagenet.utils_dataset import split_images_labels
from utils_imagenet.utils_dataset import merge_images_labels
from utils_incremental.compute_features import compute_features
from utils_incremental.compute_accuracy import compute_accuracy
from utils_incremental.compute_confusion_matrix import compute_confusion_matrix
from utils_incremental.incremental_train_and_eval import incremental_train_and_eval
######### Modifiable Settings ##########
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='seed_1993_subset_100_imagenet', type=str)
parser.add_argument('--datadir', default='data/seed_1993_subset_100_imagenet/data', type=str)
parser.add_argument('--num_classes', default=100, type=int)
parser.add_argument('--num_workers', default=16, type=int, \
help='the number of workers for loading data')
parser.add_argument('--nb_cl_fg', default=50, type=int, \
help='the number of classes in first group')
parser.add_argument('--nb_cl', default=10, type=int, \
help='Classes per group')
parser.add_argument('--nb_protos', default=20, type=int, \
help='Number of prototypes per class at the end')
parser.add_argument('--nb_runs', default=1, type=int, \
help='Number of runs (random ordering of classes at each run)')
parser.add_argument('--ckp_prefix', default=os.path.basename(sys.argv[0])[:-3], type=str, \
help='Checkpoint prefix')
parser.add_argument('--epochs', default=90, type=int, \
help='Epochs')
parser.add_argument('--T', default=2, type=float, \
help='Temporature for distialltion')
parser.add_argument('--beta', default=0.25, type=float, \
help='Beta for distialltion')
parser.add_argument('--resume', action='store_true', \
help='resume from checkpoint')
parser.add_argument('--fix_budget', action='store_true', \
help='fix budget')
parser.add_argument('--rs_ratio', default=0, type=float, \
help='The ratio for resample')
parser.add_argument('--random_seed', default=1993, type=int, \
help='random seed')
args = parser.parse_args()
########################################
assert(args.nb_cl_fg % args.nb_cl == 0)
assert(args.nb_cl_fg >= args.nb_cl)
train_batch_size = 128 # Batch size for train
test_batch_size = 50 # Batch size for test
eval_batch_size = 128 # Batch size for eval
base_lr = 0.1 # Initial learning rate
lr_strat = [30, 60] # Epochs where learning rate gets decreased
lr_factor = 0.1 # Learning rate decrease factor
custom_weight_decay = 1e-4 # Weight Decay
custom_momentum = 0.9 # Momentum
args.ckp_prefix = '{}_nb_cl_fg_{}_nb_cl_{}_nb_protos_{}'.format(args.ckp_prefix, args.nb_cl_fg, args.nb_cl, args.nb_protos)
np.random.seed(args.random_seed) # Fix the random seed
print(args)
########################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#transform_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
#])
#transform_test = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
#])
#trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
# download=True, transform=transform_train)
#testset = torchvision.datasets.CIFAR100(root='./data', train=False,
# download=True, transform=transform_test)
#evalset = torchvision.datasets.CIFAR100(root='./data', train=False,
# download=False, transform=transform_test)
# Data loading code
traindir = os.path.join(args.datadir, 'train')
valdir = os.path.join(args.datadir, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trainset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
testset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
evalset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
# Initialization
dictionary_size = 1500
top1_acc_list_cumul = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs))
top1_acc_list_ori = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs))
#X_train_total = np.array(trainset.train_data)
#Y_train_total = np.array(trainset.train_labels)
#X_valid_total = np.array(testset.test_data)
#Y_valid_total = np.array(testset.test_labels)
X_train_total, Y_train_total = split_images_labels(trainset.imgs)
X_valid_total, Y_valid_total = split_images_labels(testset.imgs)
# Launch the different runs
for iteration_total in range(args.nb_runs):
# Select the order for the class learning
order_name = "./checkpoint/seed_{}_{}_order_run_{}.pkl".format(args.random_seed, args.dataset, iteration_total)
print("Order name:{}".format(order_name))
if os.path.exists(order_name):
print("Loading orders")
order = utils_pytorch.unpickle(order_name)
else:
print("Generating orders")
order = np.arange(args.num_classes)
np.random.shuffle(order)
utils_pytorch.savepickle(order, order_name)
order_list = list(order)
print(order_list)
# Initialization of the variables for this run
X_valid_cumuls = []
X_protoset_cumuls = []
X_train_cumuls = []
Y_valid_cumuls = []
Y_protoset_cumuls = []
Y_train_cumuls = []
alpha_dr_herding = np.zeros((int(args.num_classes/args.nb_cl),dictionary_size,args.nb_cl),np.float32)
# The following contains all the training samples of the different classes
# because we want to compare our method with the theoretical case where all the training samples are stored
# prototypes = np.zeros((args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3]))
prototypes = [[] for i in range(args.num_classes)]
for orde in range(args.num_classes):
prototypes[orde] = X_train_total[np.where(Y_train_total==order[orde])]
prototypes = np.array(prototypes)
start_iter = int(args.nb_cl_fg/args.nb_cl)-1
for iteration in range(start_iter, int(args.num_classes/args.nb_cl)):
#init model
if iteration == start_iter:
############################################################
last_iter = 0
############################################################
tg_model = models.resnet18(num_classes=args.nb_cl_fg)
ref_model = None
else:
############################################################
last_iter = iteration
############################################################
#increment classes
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
new_fc = nn.Linear(in_features, out_features+args.nb_cl)
new_fc.weight.data[:out_features] = tg_model.fc.weight.data
new_fc.bias.data[:out_features] = tg_model.fc.bias.data
tg_model.fc = new_fc
# Prepare the training data for the current batch of classes
actual_cl = order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)]
indices_train_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_train_total])
indices_test_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_valid_total])
X_train = X_train_total[indices_train_10]
X_valid = X_valid_total[indices_test_10]
X_valid_cumuls.append(X_valid)
X_train_cumuls.append(X_train)
X_valid_cumul = np.concatenate(X_valid_cumuls)
X_train_cumul = np.concatenate(X_train_cumuls)
Y_train = Y_train_total[indices_train_10]
Y_valid = Y_valid_total[indices_test_10]
Y_valid_cumuls.append(Y_valid)
Y_train_cumuls.append(Y_train)
Y_valid_cumul = np.concatenate(Y_valid_cumuls)
Y_train_cumul = np.concatenate(Y_train_cumuls)
# Add the stored exemplars to the training data
if iteration == start_iter:
X_valid_ori = X_valid
Y_valid_ori = Y_valid
else:
X_protoset = np.concatenate(X_protoset_cumuls)
Y_protoset = np.concatenate(Y_protoset_cumuls)
if args.rs_ratio > 0:
#1/rs_ratio = (len(X_train)+len(X_protoset)*scale_factor)/(len(X_protoset)*scale_factor)
scale_factor = (len(X_train) * args.rs_ratio) / (len(X_protoset) * (1 - args.rs_ratio))
rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor))
#number of samples per epoch
#rs_num_samples = len(X_train) + len(X_protoset)
rs_num_samples = int(len(X_train) / (1 - args.rs_ratio))
print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples))
X_train = np.concatenate((X_train,X_protoset),axis=0)
Y_train = np.concatenate((Y_train,Y_protoset))
# Launch the training loop
print('Batch of classes number {0} arrives ...'.format(iteration+1))
map_Y_train = np.array([order_list.index(i) for i in Y_train])
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
############################################################
#trainset.train_data = X_train.astype('uint8')
#trainset.train_labels = map_Y_train
current_train_imgs = merge_images_labels(X_train, map_Y_train)
trainset.imgs = trainset.samples = current_train_imgs
if iteration > start_iter and args.rs_ratio > 0 and scale_factor > 1:
print("Weights from sampling:", rs_sample_weights)
index1 = np.where(rs_sample_weights>1)[0]
index2 = np.where(map_Y_train<iteration*args.nb_cl)[0]
assert((index1==index2).all())
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples)
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \
# shuffle=False, sampler=train_sampler, num_workers=2)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \
shuffle=False, sampler=train_sampler, num_workers=args.num_workers, pin_memory=True)
else:
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
# shuffle=True, num_workers=2)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
shuffle=True, num_workers=args.num_workers, pin_memory=True)
#testset.test_data = X_valid_cumul.astype('uint8')
#testset.test_labels = map_Y_valid_cumul
current_test_images = merge_images_labels(X_valid_cumul, map_Y_valid_cumul)
testset.imgs = testset.samples = current_test_images
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size,
shuffle=False, num_workers=2)
print('Max and Min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train)))
print('Max and Min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul)))
##############################################################
ckp_name = './checkpoint/{}_run_{}_iteration_{}_model.pth'.format(args.ckp_prefix, iteration_total, iteration)
print('ckp_name', ckp_name)
if args.resume and os.path.exists(ckp_name):
print("###############################")
print("Loading models from checkpoint")
tg_model = torch.load(ckp_name)
print("###############################")
else:
tg_params = tg_model.parameters()
tg_model = tg_model.to(device)
if iteration > start_iter:
ref_model = ref_model.to(device)
tg_optimizer = optim.SGD(tg_params, lr=base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay)
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=lr_strat, gamma=lr_factor)
tg_model = incremental_train_and_eval(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
args.T, args.beta)
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(tg_model, ckp_name)
### Exemplars
if args.fix_budget:
nb_protos_cl = int(np.ceil(args.nb_protos*args.num_classes*1.0/args.nb_cl/(iteration+1)))
else:
nb_protos_cl = args.nb_protos
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
# Herding
print('Updating exemplar set...')
for iter_dico in range(last_iter*args.nb_cl, (iteration+1)*args.nb_cl):
# Possible exemplars in the feature space and projected on the L2 sphere
# evalset.test_data = prototypes[iter_dico].astype('uint8')
# evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
current_eval_set = merge_images_labels(prototypes[iter_dico], np.zeros(len(prototypes[iter_dico])))
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
num_samples = len(prototypes[iter_dico])
mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
# Herding procedure : ranking of the potential exemplars
mu = np.mean(D,axis=1)
index1 = int(iter_dico/args.nb_cl)
index2 = iter_dico % args.nb_cl
alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0
w_t = mu
iter_herding = 0
iter_herding_eff = 0
while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000:
tmp_t = np.dot(w_t,D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if alpha_dr_herding[index1,ind_max,index2] == 0:
alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding
iter_herding += 1
w_t = w_t+mu-D[:,ind_max]
# Prepare the protoset
X_protoset_cumuls = []
Y_protoset_cumuls = []
# Class means for iCaRL and NCM + Storing the selected exemplars in the protoset
print('Computing mean-of_exemplars and theoretical mean...')
# class_means = np.zeros((64,100,2))
class_means = np.zeros((num_features, args.num_classes, 2))
for iteration2 in range(iteration+1):
for iter_dico in range(args.nb_cl):
current_cl = order[range(iteration2*args.nb_cl,(iteration2+1)*args.nb_cl)]
# Collect data in the feature space for each class
# evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico].astype('uint8')
# evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
current_eval_set = merge_images_labels(prototypes[iteration2*args.nb_cl+iter_dico], \
np.zeros(len(prototypes[iteration2*args.nb_cl+iter_dico])))
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
num_samples = len(prototypes[iteration2*args.nb_cl+iter_dico])
mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
# Flipped version also
# evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
# evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
# shuffle=False, num_workers=2)
# mapped_prototypes2 = compute_features(tg_feature_model, evalloader, num_samples, num_features)
# D2 = mapped_prototypes2.T
# D2 = D2/np.linalg.norm(D2,axis=0)
D2 = D
# iCaRL
alph = alpha_dr_herding[iteration2,:,iter_dico]
assert((alph[num_samples:]==0).all())
alph = alph[:num_samples]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
# X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico,np.where(alph==1)[0]])
X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico][np.where(alph==1)[0]])
Y_protoset_cumuls.append(order[iteration2*args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0])))
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
# Normal NCM
# alph = np.ones(dictionary_size)/dictionary_size
alph = np.ones(num_samples)/num_samples
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
torch.save(class_means, \
'./checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration))
current_means = class_means[:, order[range(0,(iteration+1)*args.nb_cl)]]
##############################################################
# Calculate validation error of model on the first nb_cl classes:
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
print('Computing accuracy on the original batch of classes...')
# evalset.test_data = X_valid_ori.astype('uint8')
# evalset.test_labels = map_Y_valid_ori
current_eval_set = merge_images_labels(X_valid_ori, map_Y_valid_ori)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T
##############################################################
# Calculate validation error of model on the cumul of classes:
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
print('Computing cumulative accuracy...')
# evalset.test_data = X_valid_cumul.astype('uint8')
# evalset.test_labels = map_Y_valid_cumul
current_eval_set = merge_images_labels(X_valid_cumul, map_Y_valid_cumul)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T
##############################################################
# Calculate confusion matrix
print('Computing confusion matrix...')
cm = compute_confusion_matrix(tg_model, tg_feature_model, current_means, evalloader)
cm_name = './checkpoint/{}_run_{}_iteration_{}_confusion_matrix.pth'.format(args.ckp_prefix,iteration_total, iteration)
with open(cm_name, 'wb') as f:
pickle.dump(cm, f, 2) #for reading with Python 2
##############################################################
# Final save of the data
torch.save(top1_acc_list_ori, \
'./checkpoint/{}_run_{}_top1_acc_list_ori.pth'.format(args.ckp_prefix, iteration_total))
torch.save(top1_acc_list_cumul, \
'./checkpoint/{}_run_{}_top1_acc_list_cumul.pth'.format(args.ckp_prefix, iteration_total))
| 22,015 | 52.307506 | 132 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/modified_resnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import modified_linear
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, last=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.last = last
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if not self.last: #remove ReLU in the last layer
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, last_phase=True)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = modified_linear.CosineLinear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, last_phase=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
if last_phase:
for i in range(1, blocks-1):
layers.append(block(self.inplanes, planes))
layers.append(block(self.inplanes, planes, last=True))
else:
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model | 3,850 | 32.198276 | 88 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/cbf_class_incremental_cosine_imagenet.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import sys
import copy
import argparse
from PIL import Image
try:
import cPickle as pickle
except:
import pickle
import math
import modified_resnet
import modified_linear
import utils_pytorch
from utils_imagenet.utils_dataset import split_images_labels
from utils_imagenet.utils_dataset import merge_images_labels
from utils_incremental.compute_features import compute_features
from utils_incremental.compute_accuracy import compute_accuracy
from utils_incremental.compute_confusion_matrix import compute_confusion_matrix
from utils_incremental.incremental_train_and_eval import incremental_train_and_eval
from utils_incremental.incremental_train_and_eval_MS import incremental_train_and_eval_MS
from utils_incremental.incremental_train_and_eval_LF import incremental_train_and_eval_LF
from utils_incremental.incremental_train_and_eval_MR_LF import incremental_train_and_eval_MR_LF
from utils_incremental.incremental_train_and_eval_AMR_LF import incremental_train_and_eval_AMR_LF
######### Modifiable Settings ##########
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='seed_1993_subset_100_imagenet', type=str)
parser.add_argument('--datadir', default='data/seed_1993_subset_100_imagenet/data', type=str)
parser.add_argument('--num_classes', default=100, type=int)
parser.add_argument('--num_workers', default=16, type=int, \
help='the number of workers for loading data')
parser.add_argument('--nb_cl_fg', default=50, type=int, \
help='the number of classes in first group')
parser.add_argument('--nb_cl', default=10, type=int, \
help='Classes per group')
parser.add_argument('--nb_protos', default=20, type=int, \
help='Number of prototypes per class at the end')
parser.add_argument('--nb_runs', default=1, type=int, \
help='Number of runs (random ordering of classes at each run)')
parser.add_argument('--ckp_prefix', default=os.path.basename(sys.argv[0])[:-3], type=str, \
help='Checkpoint prefix')
parser.add_argument('--epochs', default=90, type=int, \
help='Epochs')
parser.add_argument('--T', default=2, type=float, \
help='Temporature for distialltion')
parser.add_argument('--beta', default=0.25, type=float, \
help='Beta for distialltion')
parser.add_argument('--resume', action='store_true', \
help='resume from checkpoint')
parser.add_argument('--fix_budget', action='store_true', \
help='fix budget')
########################################
parser.add_argument('--mimic_score', action='store_true', \
help='To mimic scores for cosine embedding')
parser.add_argument('--lw_ms', default=1, type=float, \
help='loss weight for mimicking score')
########################################
#improved class incremental learning
parser.add_argument('--rs_ratio', default=0, type=float, \
help='The ratio for resample')
parser.add_argument('--imprint_weights', action='store_true', \
help='Imprint the weights for novel classes')
parser.add_argument('--less_forget', action='store_true', \
help='Less forgetful')
parser.add_argument('--lamda', default=5, type=float, \
help='Lamda for LF')
parser.add_argument('--adapt_lamda', action='store_true', \
help='Adaptively change lamda')
parser.add_argument('--mr_loss', action='store_true', \
help='Margin ranking loss v1')
parser.add_argument('--amr_loss', action='store_true', \
help='Margin ranking loss v2')
parser.add_argument('--dist', default=0.5, type=float, \
help='Dist for MarginRankingLoss')
parser.add_argument('--K', default=2, type=int, \
help='K for MarginRankingLoss')
parser.add_argument('--lw_mr', default=1, type=float, \
help='loss weight for margin ranking loss')
########################################
parser.add_argument('--random_seed', default=1993, type=int, \
help='random seed')
########################################
parser.add_argument('--cb_finetune', action='store_true', \
help='class balance finetune')
parser.add_argument('--ft_epochs', default=20, type=int, \
help='Epochs for class balance finetune')
parser.add_argument('--ft_base_lr', default=0.01, type=float, \
help='Base learning rate for class balance finetune')
parser.add_argument('--ft_lr_strat', default=[10], type=int, nargs='+', \
help='Lr_strat for class balance finetune')
parser.add_argument('--ft_flag', default=2, type=int, \
help='Flag for class balance finetune')
args = parser.parse_args()
########################################
assert(args.nb_cl_fg % args.nb_cl == 0)
assert(args.nb_cl_fg >= args.nb_cl)
train_batch_size = 128 # Batch size for train
test_batch_size = 50 # Batch size for test
eval_batch_size = 128 # Batch size for eval
base_lr = 0.1 # Initial learning rate
lr_strat = [30, 60] # Epochs where learning rate gets decreased
lr_factor = 0.1 # Learning rate decrease factor
custom_weight_decay = 1e-4 # Weight Decay
custom_momentum = 0.9 # Momentum
args.ckp_prefix = '{}_nb_cl_fg_{}_nb_cl_{}_nb_protos_{}'.format(args.ckp_prefix, args.nb_cl_fg, args.nb_cl, args.nb_protos)
np.random.seed(args.random_seed) # Fix the random seed
print(args)
########################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#transform_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
#])
#transform_test = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
#])
#trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
# download=True, transform=transform_train)
#testset = torchvision.datasets.CIFAR100(root='./data', train=False,
# download=True, transform=transform_test)
#evalset = torchvision.datasets.CIFAR100(root='./data', train=False,
# download=False, transform=transform_test)
# Data loading code
traindir = os.path.join(args.datadir, 'train')
valdir = os.path.join(args.datadir, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trainset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
testset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
evalset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
# Initialization
dictionary_size = 1500
top1_acc_list_cumul = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs))
top1_acc_list_ori = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs))
#X_train_total = np.array(trainset.train_data)
#Y_train_total = np.array(trainset.train_labels)
#X_valid_total = np.array(testset.test_data)
#Y_valid_total = np.array(testset.test_labels)
X_train_total, Y_train_total = split_images_labels(trainset.imgs)
X_valid_total, Y_valid_total = split_images_labels(testset.imgs)
# Launch the different runs
for iteration_total in range(args.nb_runs):
# Select the order for the class learning
order_name = "./checkpoint/seed_{}_{}_order_run_{}.pkl".format(args.random_seed, args.dataset, iteration_total)
print("Order name:{}".format(order_name))
if os.path.exists(order_name):
print("Loading orders")
order = utils_pytorch.unpickle(order_name)
else:
print("Generating orders")
order = np.arange(args.num_classes)
np.random.shuffle(order)
utils_pytorch.savepickle(order, order_name)
order_list = list(order)
print(order_list)
# Initialization of the variables for this run
X_valid_cumuls = []
X_protoset_cumuls = []
X_train_cumuls = []
Y_valid_cumuls = []
Y_protoset_cumuls = []
Y_train_cumuls = []
alpha_dr_herding = np.zeros((int(args.num_classes/args.nb_cl),dictionary_size,args.nb_cl),np.float32)
# The following contains all the training samples of the different classes
# because we want to compare our method with the theoretical case where all the training samples are stored
# prototypes = np.zeros((args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3]))
prototypes = [[] for i in range(args.num_classes)]
for orde in range(args.num_classes):
prototypes[orde] = X_train_total[np.where(Y_train_total==order[orde])]
prototypes = np.array(prototypes)
start_iter = int(args.nb_cl_fg/args.nb_cl)-1
for iteration in range(start_iter, int(args.num_classes/args.nb_cl)):
#init model
if iteration == start_iter:
############################################################
last_iter = 0
############################################################
tg_model = modified_resnet.resnet18(num_classes=args.nb_cl_fg)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("in_features:", in_features, "out_features:", out_features)
ref_model = None
elif iteration == start_iter+1:
############################################################
last_iter = iteration
############################################################
#increment classes
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("in_features:", in_features, "out_features:", out_features)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features, args.nb_cl)
new_fc.fc1.weight.data = tg_model.fc.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = out_features*1.0 / args.nb_cl
else:
############################################################
last_iter = iteration
############################################################
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features1 = tg_model.fc.fc1.out_features
out_features2 = tg_model.fc.fc2.out_features
print("in_features:", in_features, "out_features1:", \
out_features1, "out_features2:", out_features2)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features1+out_features2, args.nb_cl)
new_fc.fc1.weight.data[:out_features1] = tg_model.fc.fc1.weight.data
new_fc.fc1.weight.data[out_features1:] = tg_model.fc.fc2.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = (out_features1+out_features2)*1.0 / (args.nb_cl)
if iteration > start_iter and args.less_forget and args.adapt_lamda:
#cur_lamda = lamda_base * sqrt(num_old_classes/num_new_classes)
cur_lamda = args.lamda * math.sqrt(lamda_mult)
else:
cur_lamda = args.lamda
if iteration > start_iter and args.less_forget:
print("###############################")
print("Lamda for less forget is set to ", cur_lamda)
print("###############################")
# Prepare the training data for the current batch of classes
actual_cl = order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)]
indices_train_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_train_total])
indices_test_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_valid_total])
X_train = X_train_total[indices_train_10]
X_valid = X_valid_total[indices_test_10]
X_valid_cumuls.append(X_valid)
X_train_cumuls.append(X_train)
X_valid_cumul = np.concatenate(X_valid_cumuls)
X_train_cumul = np.concatenate(X_train_cumuls)
Y_train = Y_train_total[indices_train_10]
Y_valid = Y_valid_total[indices_test_10]
Y_valid_cumuls.append(Y_valid)
Y_train_cumuls.append(Y_train)
Y_valid_cumul = np.concatenate(Y_valid_cumuls)
Y_train_cumul = np.concatenate(Y_train_cumuls)
# Add the stored exemplars to the training data
if iteration == start_iter:
X_valid_ori = X_valid
Y_valid_ori = Y_valid
else:
X_protoset = np.concatenate(X_protoset_cumuls)
Y_protoset = np.concatenate(Y_protoset_cumuls)
if args.rs_ratio > 0:
#1/rs_ratio = (len(X_train)+len(X_protoset)*scale_factor)/(len(X_protoset)*scale_factor)
scale_factor = (len(X_train) * args.rs_ratio) / (len(X_protoset) * (1 - args.rs_ratio))
rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor))
#number of samples per epoch
#rs_num_samples = len(X_train) + len(X_protoset)
rs_num_samples = int(len(X_train) / (1 - args.rs_ratio))
print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples))
X_train = np.concatenate((X_train,X_protoset),axis=0)
Y_train = np.concatenate((Y_train,Y_protoset))
# Launch the training loop
print('Batch of classes number {0} arrives ...'.format(iteration+1))
map_Y_train = np.array([order_list.index(i) for i in Y_train])
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
#imprint weights
if iteration > start_iter and args.imprint_weights:
#input: tg_model, X_train, map_Y_train
#class_start = iteration*nb_cl class_end = (iteration+1)*nb_cl
print("Imprint weights")
#########################################
#compute the average norm of old embdding
old_embedding_norm = tg_model.fc.fc1.weight.data.norm(dim=1, keepdim=True)
average_old_embedding_norm = torch.mean(old_embedding_norm, dim=0).to('cpu').type(torch.DoubleTensor)
#########################################
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
novel_embedding = torch.zeros((args.nb_cl, num_features))
for cls_idx in range(iteration*args.nb_cl, (iteration+1)*args.nb_cl):
cls_indices = np.array([i == cls_idx for i in map_Y_train])
assert(len(np.where(cls_indices==1)[0])<=dictionary_size)
#evalset.test_data = X_train[cls_indices].astype('uint8')
#evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
current_eval_set = merge_images_labels(X_train[cls_indices], np.zeros(len(X_train[cls_indices])))
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
num_samples = len(X_train[cls_indices])
cls_features = compute_features(tg_feature_model, evalloader, num_samples, num_features)
#cls_features = cls_features.T
#cls_features = cls_features / np.linalg.norm(cls_features,axis=0)
#cls_embedding = np.mean(cls_features, axis=1)
norm_features = F.normalize(torch.from_numpy(cls_features), p=2, dim=1)
cls_embedding = torch.mean(norm_features, dim=0)
#novel_embedding[cls_idx-iteration*args.nb_cl] = cls_embedding
novel_embedding[cls_idx-iteration*args.nb_cl] = F.normalize(cls_embedding, p=2, dim=0) * average_old_embedding_norm
tg_model.to(device)
#torch.save(tg_model, "tg_model_before_imprint_weights.pth")
tg_model.fc.fc2.weight.data = novel_embedding.to(device)
#torch.save(tg_model, "tg_model_after_imprint_weights.pth")
############################################################
#trainset.train_data = X_train.astype('uint8')
#trainset.train_labels = map_Y_train
current_train_imgs = merge_images_labels(X_train, map_Y_train)
trainset.imgs = trainset.samples = current_train_imgs
if iteration > start_iter and args.rs_ratio > 0 and scale_factor > 1:
print("Weights from sampling:", rs_sample_weights)
index1 = np.where(rs_sample_weights>1)[0]
index2 = np.where(map_Y_train<iteration*args.nb_cl)[0]
assert((index1==index2).all())
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples)
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \
# shuffle=False, sampler=train_sampler, num_workers=2)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \
shuffle=False, sampler=train_sampler, num_workers=args.num_workers, pin_memory=True)
else:
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
# shuffle=True, num_workers=2)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
shuffle=True, num_workers=args.num_workers, pin_memory=True)
#testset.test_data = X_valid_cumul.astype('uint8')
#testset.test_labels = map_Y_valid_cumul
current_test_imgs = merge_images_labels(X_valid_cumul, map_Y_valid_cumul)
testset.imgs = testset.samples = current_test_imgs
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size,
shuffle=False, num_workers=2)
print('Max and Min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train)))
print('Max and Min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul)))
##############################################################
ckp_name = './checkpoint/{}_run_{}_iteration_{}_model.pth'.format(args.ckp_prefix, iteration_total, iteration)
print('ckp_name', ckp_name)
if args.resume and os.path.exists(ckp_name):
print("###############################")
print("Loading models from checkpoint")
tg_model = torch.load(ckp_name)
print("###############################")
else:
###############################
if iteration > start_iter and args.less_forget:
#fix the embedding of old classes
ignored_params = list(map(id, tg_model.fc.fc1.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, \
tg_model.parameters())
tg_params =[{'params': base_params, 'lr': base_lr, 'weight_decay': custom_weight_decay}, \
{'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}]
else:
tg_params = tg_model.parameters()
###############################
tg_model = tg_model.to(device)
if iteration > start_iter:
ref_model = ref_model.to(device)
tg_optimizer = optim.SGD(tg_params, lr=base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay)
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=lr_strat, gamma=lr_factor)
###############################
if args.less_forget and args.mr_loss:
print("incremental_train_and_eval_MR_LF")
tg_model = incremental_train_and_eval_MR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda, \
args.dist, args.K, args.lw_mr)
elif args.less_forget and args.amr_loss:
print("incremental_train_and_eval_AMR_LF")
tg_model = incremental_train_and_eval_AMR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda, \
args.dist, args.K, args.lw_mr)
else:
if args.less_forget:
print("incremental_train_and_eval_LF")
tg_model = incremental_train_and_eval_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda)
else:
if args.mimic_score:
print("incremental_train_and_eval_MS")
tg_model = incremental_train_and_eval_MS(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter,
args.lw_ms)
else:
print("incremental_train_and_eval")
tg_model = incremental_train_and_eval(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter,
args.T, args.beta)
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(tg_model, ckp_name)
### Exemplars
if args.fix_budget:
nb_protos_cl = int(np.ceil(args.nb_protos*args.num_classes*1.0/args.nb_cl/(iteration+1)))
else:
nb_protos_cl = args.nb_protos
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
# Herding
print('Updating exemplar set...')
for iter_dico in range(last_iter*args.nb_cl, (iteration+1)*args.nb_cl):
# Possible exemplars in the feature space and projected on the L2 sphere
# evalset.test_data = prototypes[iter_dico].astype('uint8')
# evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
current_eval_set = merge_images_labels(prototypes[iter_dico], np.zeros(len(prototypes[iter_dico])))
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
num_samples = len(prototypes[iter_dico])
mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
# Herding procedure : ranking of the potential exemplars
mu = np.mean(D,axis=1)
index1 = int(iter_dico/args.nb_cl)
index2 = iter_dico % args.nb_cl
alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0
w_t = mu
iter_herding = 0
iter_herding_eff = 0
while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000:
tmp_t = np.dot(w_t,D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if alpha_dr_herding[index1,ind_max,index2] == 0:
alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding
iter_herding += 1
w_t = w_t+mu-D[:,ind_max]
# Prepare the protoset
X_protoset_cumuls = []
Y_protoset_cumuls = []
# Class means for iCaRL and NCM + Storing the selected exemplars in the protoset
print('Computing mean-of_exemplars and theoretical mean...')
# class_means = np.zeros((64,100,2))
class_means = np.zeros((num_features, args.num_classes, 2))
for iteration2 in range(iteration+1):
for iter_dico in range(args.nb_cl):
current_cl = order[range(iteration2*args.nb_cl,(iteration2+1)*args.nb_cl)]
# Collect data in the feature space for each class
# evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico].astype('uint8')
# evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
current_eval_set = merge_images_labels(prototypes[iteration2*args.nb_cl+iter_dico], \
np.zeros(len(prototypes[iteration2*args.nb_cl+iter_dico])))
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
num_samples = len(prototypes[iteration2*args.nb_cl+iter_dico])
mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
# Flipped version also
# evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
# evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
# shuffle=False, num_workers=2)
# mapped_prototypes2 = compute_features(tg_feature_model, evalloader, num_samples, num_features)
# D2 = mapped_prototypes2.T
# D2 = D2/np.linalg.norm(D2,axis=0)
D2 = D
# iCaRL
alph = alpha_dr_herding[iteration2,:,iter_dico]
assert((alph[num_samples:]==0).all())
alph = alph[:num_samples]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
# X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico,np.where(alph==1)[0]])
X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico][np.where(alph==1)[0]])
Y_protoset_cumuls.append(order[iteration2*args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0])))
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
# Normal NCM
# alph = np.ones(dictionary_size)/dictionary_size
alph = np.ones(num_samples)/num_samples
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
# torch.save(class_means, \
# './checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration))
class_means_name = './checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration)
torch.save(class_means, class_means_name)
current_means = class_means[:, order[range(0,(iteration+1)*args.nb_cl)]]
##############################################################
# Calculate validation error of model on the first nb_cl classes:
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
print('Computing accuracy on the original batch of classes...')
# evalset.test_data = X_valid_ori.astype('uint8')
# evalset.test_labels = map_Y_valid_ori
current_eval_set = merge_images_labels(X_valid_ori, map_Y_valid_ori)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T
##############################################################
# Calculate validation error of model on the cumul of classes:
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
print('Computing cumulative accuracy...')
# evalset.test_data = X_valid_cumul.astype('uint8')
# evalset.test_labels = map_Y_valid_cumul
current_eval_set = merge_images_labels(X_valid_cumul, map_Y_valid_cumul)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T
##############################################################
# Calculate confusion matrix
# print('Computing confusion matrix...')
# cm = compute_confusion_matrix(tg_model, tg_feature_model, current_means, evalloader)
# cm_name = './checkpoint/{}_run_{}_iteration_{}_confusion_matrix.pth'.format(args.ckp_prefix,iteration_total, iteration)
# with open(cm_name, 'wb') as f:
# pickle.dump(cm, f, 2) #for reading with Python 2
##############################################################
if iteration == start_iter and args.cb_finetune: #for the convenience of evaluation
torch.save(tg_model, ckp_name.replace("/checkpoint/", "/checkpoint/AFTER_CBF_"))
torch.save(class_means, class_means_name.replace("/checkpoint/", "/checkpoint/AFTER_CBF_"))
if iteration > start_iter and args.cb_finetune:
# Class balance finetuning on the protoset
print("###############################")
print("Class balance finetuning on the protoset")
print("###############################")
map_Y_protoset_cumuls = np.array([order_list.index(i) for i in np.concatenate(Y_protoset_cumuls)])
current_train_imgs = merge_images_labels(np.concatenate(X_protoset_cumuls), map_Y_protoset_cumuls)
trainset.imgs = trainset.samples = current_train_imgs
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
shuffle=True, num_workers=args.num_workers, pin_memory=True)
print('Min and Max of train labels: {}, {}'.format(min(map_Y_protoset_cumuls), max(map_Y_protoset_cumuls)))
###############################
print('Computing accuracy on the protoset...')
current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls), map_Y_protoset_cumuls)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
###############################
print('Computing accuracy on the old protoset...')
indices = np.array([i in range(0, iteration*args.nb_cl) for i in map_Y_protoset_cumuls])
current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls)[indices], map_Y_protoset_cumuls[indices])
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
print('Min and Max of eval labels: {}, {}'.format(min(map_Y_protoset_cumuls[indices]), max(map_Y_protoset_cumuls[indices])))
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
###############################
print('Computing accuracy on the new protoset...')
indices = np.array([i in range(iteration*args.nb_cl, (iteration+1)*args.nb_cl) for i in map_Y_protoset_cumuls])
current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls)[indices], map_Y_protoset_cumuls[indices])
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
print('Min and Max of eval labels: {}, {}'.format(min(map_Y_protoset_cumuls[indices]), max(map_Y_protoset_cumuls[indices])))
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
##############################################################
#tg_params = tg_model.parameters()
if args.ft_flag == 0: #everything is not updated
ignored_params = list(map(id, tg_model.fc.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params,
tg_model.parameters())
tg_params =[{'params': base_params, 'lr': 0, 'weight_decay': 0}, \
{'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}, \
{'params': tg_model.fc.fc2.parameters(), 'lr': 0, 'weight_decay': 0}]
fix_bn_flag = True
tg_model = tg_model.to(device)
ref_model = ref_model.to(device)
tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay)
tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor)
tg_model = incremental_train_and_eval_MR_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda, \
args.dist, args.K, args.lw_mr, \
fix_bn=fix_bn_flag)
elif args.ft_flag == 1: #only the novel embeddings are updated with the feature extractor fixed
ignored_params = list(map(id, tg_model.fc.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params,
tg_model.parameters())
tg_params =[{'params': base_params, 'lr': 0, 'weight_decay': 0}, \
{'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}, \
{'params': tg_model.fc.fc2.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}]
fix_bn_flag = True
tg_model = tg_model.to(device)
ref_model = ref_model.to(device)
tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay)
tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor)
tg_model = incremental_train_and_eval_MR_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda, \
args.dist, args.K, args.lw_mr, \
fix_bn=fix_bn_flag)
elif args.ft_flag == 2: #both the old and novel embeddings are updated with the feature extractor fixed
ignored_params = list(map(id, tg_model.fc.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params,
tg_model.parameters())
tg_params =[{'params': base_params, 'lr': 0, 'weight_decay': 0}, \
{'params': tg_model.fc.fc1.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}, \
{'params': tg_model.fc.fc2.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}]
fix_bn_flag = True
tg_model = tg_model.to(device)
ref_model = ref_model.to(device)
tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay)
tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor)
tg_model = incremental_train_and_eval_MR_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda, \
args.dist, args.K, args.lw_mr, \
fix_bn=fix_bn_flag)
elif args.ft_flag == 3: #everything is updated
ignored_params = list(map(id, tg_model.fc.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params,
tg_model.parameters())
tg_params =[{'params': base_params, 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}, \
{'params': tg_model.fc.fc1.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}, \
{'params': tg_model.fc.fc2.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}]
fix_bn_flag = False
tg_model = tg_model.to(device)
ref_model = ref_model.to(device)
tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay)
tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor)
tg_model = incremental_train_and_eval_MR_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda, \
args.dist, args.K, args.lw_mr, \
fix_bn=fix_bn_flag)
#both the old and novel embeddings are updated with the feature extractor fixed
#the MR loss is removed in the CBF (ft_flag=4) and removed in all the training (ft_flag=5)
#the differences lie in the models for CBF
elif args.ft_flag == 4 or args.ft_flag == 5:
ignored_params = list(map(id, tg_model.fc.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params,
tg_model.parameters())
tg_params =[{'params': base_params, 'lr': 0, 'weight_decay': 0}, \
{'params': tg_model.fc.fc1.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}, \
{'params': tg_model.fc.fc2.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}]
fix_bn_flag = True
tg_model = tg_model.to(device)
ref_model = ref_model.to(device)
tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay)
tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor)
tg_model = incremental_train_and_eval_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda, \
fix_bn=fix_bn_flag)
else:
print("Unknown ft_flag")
sys.exit()
###############################
# tg_model = tg_model.to(device)
# ref_model = ref_model.to(device)
# tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay)
# tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor)
# tg_model = incremental_train_and_eval_MR_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \
# trainloader, testloader, \
# iteration, start_iter, \
# cur_lamda, \
# args.dist, args.K, args.lw_mr, \
# fix_bn=fix_bn_flag)
torch.save(tg_model, ckp_name.replace("/checkpoint/", "/checkpoint/AFTER_CBF_"))
torch.save(class_means, class_means_name.replace("/checkpoint/", "/checkpoint/AFTER_CBF_"))
###############################################################
###############################
print('Computing accuracy on the protoset...')
current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls), map_Y_protoset_cumuls)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
###############################
print('Computing accuracy on the old protoset...')
indices = np.array([i in range(0, iteration*args.nb_cl) for i in map_Y_protoset_cumuls])
current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls)[indices], map_Y_protoset_cumuls[indices])
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
print('Min and Max of eval labels: {}, {}'.format(min(map_Y_protoset_cumuls[indices]), max(map_Y_protoset_cumuls[indices])))
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
###############################
print('Computing accuracy on the new protoset...')
indices = np.array([i in range(iteration*args.nb_cl, (iteration+1)*args.nb_cl) for i in map_Y_protoset_cumuls])
current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls)[indices], map_Y_protoset_cumuls[indices])
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
print('Min and Max of eval labels: {}, {}'.format(min(map_Y_protoset_cumuls[indices]), max(map_Y_protoset_cumuls[indices])))
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
###############################
##############################################################
# Calculate validation error of model on the first nb_cl classes:
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
print('Computing accuracy on the original batch of classes...')
# evalset.test_data = X_valid_ori.astype('uint8')
# evalset.test_labels = map_Y_valid_ori
current_eval_set = merge_images_labels(X_valid_ori, map_Y_valid_ori)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T
##############################################################
# Calculate validation error of model on the cumul of classes:
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
print('Computing cumulative accuracy...')
# evalset.test_data = X_valid_cumul.astype('uint8')
# evalset.test_labels = map_Y_valid_cumul
current_eval_set = merge_images_labels(X_valid_cumul, map_Y_valid_cumul)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T
##############################################################
# Final save of the data
torch.save(top1_acc_list_ori, \
'./checkpoint/{}_run_{}_top1_acc_list_ori.pth'.format(args.ckp_prefix, iteration_total))
torch.save(top1_acc_list_cumul, \
'./checkpoint/{}_run_{}_top1_acc_list_cumul.pth'.format(args.ckp_prefix, iteration_total))
| 47,336 | 60.08 | 136 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/class_incremental_cosine_imagenet.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import sys
import copy
import argparse
from PIL import Image
try:
import cPickle as pickle
except:
import pickle
import math
import modified_resnet
import modified_linear
import utils_pytorch
from utils_imagenet.utils_dataset import split_images_labels
from utils_imagenet.utils_dataset import merge_images_labels
from utils_incremental.compute_features import compute_features
from utils_incremental.compute_accuracy import compute_accuracy
from utils_incremental.compute_confusion_matrix import compute_confusion_matrix
from utils_incremental.incremental_train_and_eval import incremental_train_and_eval
from utils_incremental.incremental_train_and_eval_MS import incremental_train_and_eval_MS
from utils_incremental.incremental_train_and_eval_LF import incremental_train_and_eval_LF
from utils_incremental.incremental_train_and_eval_MR_LF import incremental_train_and_eval_MR_LF
from utils_incremental.incremental_train_and_eval_AMR_LF import incremental_train_and_eval_AMR_LF
######### Modifiable Settings ##########
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='seed_1993_subset_100_imagenet', type=str)
parser.add_argument('--datadir', default='data/seed_1993_subset_100_imagenet/data', type=str)
parser.add_argument('--num_classes', default=100, type=int)
parser.add_argument('--num_workers', default=16, type=int, \
help='the number of workers for loading data')
parser.add_argument('--nb_cl_fg', default=50, type=int, \
help='the number of classes in first group')
parser.add_argument('--nb_cl', default=10, type=int, \
help='Classes per group')
parser.add_argument('--nb_protos', default=20, type=int, \
help='Number of prototypes per class at the end')
parser.add_argument('--nb_runs', default=1, type=int, \
help='Number of runs (random ordering of classes at each run)')
parser.add_argument('--ckp_prefix', default=os.path.basename(sys.argv[0])[:-3], type=str, \
help='Checkpoint prefix')
parser.add_argument('--epochs', default=90, type=int, \
help='Epochs')
parser.add_argument('--T', default=2, type=float, \
help='Temporature for distialltion')
parser.add_argument('--beta', default=0.25, type=float, \
help='Beta for distialltion')
parser.add_argument('--resume', action='store_true', \
help='resume from checkpoint')
parser.add_argument('--fix_budget', action='store_true', \
help='fix budget')
########################################
parser.add_argument('--mimic_score', action='store_true', \
help='To mimic scores for cosine embedding')
parser.add_argument('--lw_ms', default=1, type=float, \
help='loss weight for mimicking score')
########################################
#improved class incremental learning
parser.add_argument('--rs_ratio', default=0, type=float, \
help='The ratio for resample')
parser.add_argument('--imprint_weights', action='store_true', \
help='Imprint the weights for novel classes')
parser.add_argument('--less_forget', action='store_true', \
help='Less forgetful')
parser.add_argument('--lamda', default=5, type=float, \
help='Lamda for LF')
parser.add_argument('--adapt_lamda', action='store_true', \
help='Adaptively change lamda')
parser.add_argument('--mr_loss', action='store_true', \
help='Margin ranking loss v1')
parser.add_argument('--amr_loss', action='store_true', \
help='Margin ranking loss v2')
parser.add_argument('--dist', default=0.5, type=float, \
help='Dist for MarginRankingLoss')
parser.add_argument('--K', default=2, type=int, \
help='K for MarginRankingLoss')
parser.add_argument('--lw_mr', default=1, type=float, \
help='loss weight for margin ranking loss')
########################################
parser.add_argument('--random_seed', default=1993, type=int, \
help='random seed')
args = parser.parse_args()
########################################
assert(args.nb_cl_fg % args.nb_cl == 0)
assert(args.nb_cl_fg >= args.nb_cl)
train_batch_size = 128 # Batch size for train
test_batch_size = 50 # Batch size for test
eval_batch_size = 128 # Batch size for eval
base_lr = 0.1 # Initial learning rate
lr_strat = [30, 60] # Epochs where learning rate gets decreased
lr_factor = 0.1 # Learning rate decrease factor
custom_weight_decay = 1e-4 # Weight Decay
custom_momentum = 0.9 # Momentum
args.ckp_prefix = '{}_nb_cl_fg_{}_nb_cl_{}_nb_protos_{}'.format(args.ckp_prefix, args.nb_cl_fg, args.nb_cl, args.nb_protos)
np.random.seed(args.random_seed) # Fix the random seed
print(args)
########################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#transform_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
#])
#transform_test = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
#])
#trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
# download=True, transform=transform_train)
#testset = torchvision.datasets.CIFAR100(root='./data', train=False,
# download=True, transform=transform_test)
#evalset = torchvision.datasets.CIFAR100(root='./data', train=False,
# download=False, transform=transform_test)
# Data loading code
traindir = os.path.join(args.datadir, 'train')
valdir = os.path.join(args.datadir, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trainset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
testset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
evalset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
# Initialization
dictionary_size = 1500
top1_acc_list_cumul = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs))
top1_acc_list_ori = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs))
#X_train_total = np.array(trainset.train_data)
#Y_train_total = np.array(trainset.train_labels)
#X_valid_total = np.array(testset.test_data)
#Y_valid_total = np.array(testset.test_labels)
X_train_total, Y_train_total = split_images_labels(trainset.imgs)
X_valid_total, Y_valid_total = split_images_labels(testset.imgs)
# Launch the different runs
for iteration_total in range(args.nb_runs):
# Select the order for the class learning
order_name = "./checkpoint/seed_{}_{}_order_run_{}.pkl".format(args.random_seed, args.dataset, iteration_total)
print("Order name:{}".format(order_name))
if os.path.exists(order_name):
print("Loading orders")
order = utils_pytorch.unpickle(order_name)
else:
print("Generating orders")
order = np.arange(args.num_classes)
np.random.shuffle(order)
utils_pytorch.savepickle(order, order_name)
order_list = list(order)
print(order_list)
# Initialization of the variables for this run
X_valid_cumuls = []
X_protoset_cumuls = []
X_train_cumuls = []
Y_valid_cumuls = []
Y_protoset_cumuls = []
Y_train_cumuls = []
alpha_dr_herding = np.zeros((int(args.num_classes/args.nb_cl),dictionary_size,args.nb_cl),np.float32)
# The following contains all the training samples of the different classes
# because we want to compare our method with the theoretical case where all the training samples are stored
# prototypes = np.zeros((args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3]))
prototypes = [[] for i in range(args.num_classes)]
for orde in range(args.num_classes):
prototypes[orde] = X_train_total[np.where(Y_train_total==order[orde])]
prototypes = np.array(prototypes)
start_iter = int(args.nb_cl_fg/args.nb_cl)-1
for iteration in range(start_iter, int(args.num_classes/args.nb_cl)):
#init model
if iteration == start_iter:
############################################################
last_iter = 0
############################################################
tg_model = modified_resnet.resnet18(num_classes=args.nb_cl_fg)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("in_features:", in_features, "out_features:", out_features)
ref_model = None
elif iteration == start_iter+1:
############################################################
last_iter = iteration
############################################################
#increment classes
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("in_features:", in_features, "out_features:", out_features)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features, args.nb_cl)
new_fc.fc1.weight.data = tg_model.fc.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = out_features*1.0 / args.nb_cl
else:
############################################################
last_iter = iteration
############################################################
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features1 = tg_model.fc.fc1.out_features
out_features2 = tg_model.fc.fc2.out_features
print("in_features:", in_features, "out_features1:", \
out_features1, "out_features2:", out_features2)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features1+out_features2, args.nb_cl)
new_fc.fc1.weight.data[:out_features1] = tg_model.fc.fc1.weight.data
new_fc.fc1.weight.data[out_features1:] = tg_model.fc.fc2.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = (out_features1+out_features2)*1.0 / (args.nb_cl)
if iteration > start_iter and args.less_forget and args.adapt_lamda:
#cur_lamda = lamda_base * sqrt(num_old_classes/num_new_classes)
cur_lamda = args.lamda * math.sqrt(lamda_mult)
else:
cur_lamda = args.lamda
if iteration > start_iter and args.less_forget:
print("###############################")
print("Lamda for less forget is set to ", cur_lamda)
print("###############################")
# Prepare the training data for the current batch of classes
actual_cl = order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)]
indices_train_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_train_total])
indices_test_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_valid_total])
X_train = X_train_total[indices_train_10]
X_valid = X_valid_total[indices_test_10]
X_valid_cumuls.append(X_valid)
X_train_cumuls.append(X_train)
X_valid_cumul = np.concatenate(X_valid_cumuls)
X_train_cumul = np.concatenate(X_train_cumuls)
Y_train = Y_train_total[indices_train_10]
Y_valid = Y_valid_total[indices_test_10]
Y_valid_cumuls.append(Y_valid)
Y_train_cumuls.append(Y_train)
Y_valid_cumul = np.concatenate(Y_valid_cumuls)
Y_train_cumul = np.concatenate(Y_train_cumuls)
# Add the stored exemplars to the training data
if iteration == start_iter:
X_valid_ori = X_valid
Y_valid_ori = Y_valid
else:
X_protoset = np.concatenate(X_protoset_cumuls)
Y_protoset = np.concatenate(Y_protoset_cumuls)
if args.rs_ratio > 0:
#1/rs_ratio = (len(X_train)+len(X_protoset)*scale_factor)/(len(X_protoset)*scale_factor)
scale_factor = (len(X_train) * args.rs_ratio) / (len(X_protoset) * (1 - args.rs_ratio))
rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor))
#number of samples per epoch
#rs_num_samples = len(X_train) + len(X_protoset)
rs_num_samples = int(len(X_train) / (1 - args.rs_ratio))
print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples))
X_train = np.concatenate((X_train,X_protoset),axis=0)
Y_train = np.concatenate((Y_train,Y_protoset))
# Launch the training loop
print('Batch of classes number {0} arrives ...'.format(iteration+1))
map_Y_train = np.array([order_list.index(i) for i in Y_train])
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
#imprint weights
if iteration > start_iter and args.imprint_weights:
#input: tg_model, X_train, map_Y_train
#class_start = iteration*nb_cl class_end = (iteration+1)*nb_cl
print("Imprint weights")
#########################################
#compute the average norm of old embdding
old_embedding_norm = tg_model.fc.fc1.weight.data.norm(dim=1, keepdim=True)
average_old_embedding_norm = torch.mean(old_embedding_norm, dim=0).to('cpu').type(torch.DoubleTensor)
#########################################
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
novel_embedding = torch.zeros((args.nb_cl, num_features))
for cls_idx in range(iteration*args.nb_cl, (iteration+1)*args.nb_cl):
cls_indices = np.array([i == cls_idx for i in map_Y_train])
assert(len(np.where(cls_indices==1)[0])<=dictionary_size)
#evalset.test_data = X_train[cls_indices].astype('uint8')
#evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
current_eval_set = merge_images_labels(X_train[cls_indices], np.zeros(len(X_train[cls_indices])))
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
num_samples = len(X_train[cls_indices])
cls_features = compute_features(tg_feature_model, evalloader, num_samples, num_features)
#cls_features = cls_features.T
#cls_features = cls_features / np.linalg.norm(cls_features,axis=0)
#cls_embedding = np.mean(cls_features, axis=1)
norm_features = F.normalize(torch.from_numpy(cls_features), p=2, dim=1)
cls_embedding = torch.mean(norm_features, dim=0)
#novel_embedding[cls_idx-iteration*args.nb_cl] = cls_embedding
novel_embedding[cls_idx-iteration*args.nb_cl] = F.normalize(cls_embedding, p=2, dim=0) * average_old_embedding_norm
tg_model.to(device)
#torch.save(tg_model, "tg_model_before_imprint_weights.pth")
tg_model.fc.fc2.weight.data = novel_embedding.to(device)
#torch.save(tg_model, "tg_model_after_imprint_weights.pth")
############################################################
#trainset.train_data = X_train.astype('uint8')
#trainset.train_labels = map_Y_train
current_train_imgs = merge_images_labels(X_train, map_Y_train)
trainset.imgs = trainset.samples = current_train_imgs
if iteration > start_iter and args.rs_ratio > 0 and scale_factor > 1:
print("Weights from sampling:", rs_sample_weights)
index1 = np.where(rs_sample_weights>1)[0]
index2 = np.where(map_Y_train<iteration*args.nb_cl)[0]
assert((index1==index2).all())
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples)
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \
# shuffle=False, sampler=train_sampler, num_workers=2)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \
shuffle=False, sampler=train_sampler, num_workers=args.num_workers, pin_memory=True)
else:
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
# shuffle=True, num_workers=2)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
shuffle=True, num_workers=args.num_workers, pin_memory=True)
#testset.test_data = X_valid_cumul.astype('uint8')
#testset.test_labels = map_Y_valid_cumul
current_test_imgs = merge_images_labels(X_valid_cumul, map_Y_valid_cumul)
testset.imgs = testset.samples = current_test_imgs
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size,
shuffle=False, num_workers=2)
print('Max and Min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train)))
print('Max and Min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul)))
##############################################################
ckp_name = './checkpoint/{}_run_{}_iteration_{}_model.pth'.format(args.ckp_prefix, iteration_total, iteration)
print('ckp_name', ckp_name)
if args.resume and os.path.exists(ckp_name):
print("###############################")
print("Loading models from checkpoint")
tg_model = torch.load(ckp_name)
print("###############################")
else:
###############################
if iteration > start_iter and args.less_forget:
#fix the embedding of old classes
ignored_params = list(map(id, tg_model.fc.fc1.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, \
tg_model.parameters())
tg_params =[{'params': base_params, 'lr': base_lr, 'weight_decay': custom_weight_decay}, \
{'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}]
else:
tg_params = tg_model.parameters()
###############################
tg_model = tg_model.to(device)
if iteration > start_iter:
ref_model = ref_model.to(device)
tg_optimizer = optim.SGD(tg_params, lr=base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay)
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=lr_strat, gamma=lr_factor)
###############################
if args.less_forget and args.mr_loss:
print("incremental_train_and_eval_MR_LF")
tg_model = incremental_train_and_eval_MR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda, \
args.dist, args.K, args.lw_mr)
elif args.less_forget and args.amr_loss:
print("incremental_train_and_eval_AMR_LF")
tg_model = incremental_train_and_eval_AMR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda, \
args.dist, args.K, args.lw_mr)
else:
if args.less_forget:
print("incremental_train_and_eval_LF")
tg_model = incremental_train_and_eval_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda)
else:
if args.mimic_score:
print("incremental_train_and_eval_MS")
tg_model = incremental_train_and_eval_MS(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter,
args.lw_ms)
else:
print("incremental_train_and_eval")
tg_model = incremental_train_and_eval(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter,
args.T, args.beta)
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(tg_model, ckp_name)
### Exemplars
if args.fix_budget:
nb_protos_cl = int(np.ceil(args.nb_protos*args.num_classes*1.0/args.nb_cl/(iteration+1)))
else:
nb_protos_cl = args.nb_protos
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
# Herding
print('Updating exemplar set...')
for iter_dico in range(last_iter*args.nb_cl, (iteration+1)*args.nb_cl):
# Possible exemplars in the feature space and projected on the L2 sphere
# evalset.test_data = prototypes[iter_dico].astype('uint8')
# evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
current_eval_set = merge_images_labels(prototypes[iter_dico], np.zeros(len(prototypes[iter_dico])))
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
num_samples = len(prototypes[iter_dico])
mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
# Herding procedure : ranking of the potential exemplars
mu = np.mean(D,axis=1)
index1 = int(iter_dico/args.nb_cl)
index2 = iter_dico % args.nb_cl
alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0
w_t = mu
iter_herding = 0
iter_herding_eff = 0
while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000:
tmp_t = np.dot(w_t,D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if alpha_dr_herding[index1,ind_max,index2] == 0:
alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding
iter_herding += 1
w_t = w_t+mu-D[:,ind_max]
# Prepare the protoset
X_protoset_cumuls = []
Y_protoset_cumuls = []
# Class means for iCaRL and NCM + Storing the selected exemplars in the protoset
print('Computing mean-of_exemplars and theoretical mean...')
# class_means = np.zeros((64,100,2))
class_means = np.zeros((num_features, args.num_classes, 2))
for iteration2 in range(iteration+1):
for iter_dico in range(args.nb_cl):
current_cl = order[range(iteration2*args.nb_cl,(iteration2+1)*args.nb_cl)]
# Collect data in the feature space for each class
# evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico].astype('uint8')
# evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
current_eval_set = merge_images_labels(prototypes[iteration2*args.nb_cl+iter_dico], \
np.zeros(len(prototypes[iteration2*args.nb_cl+iter_dico])))
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
num_samples = len(prototypes[iteration2*args.nb_cl+iter_dico])
mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
# Flipped version also
# evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
# evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
# shuffle=False, num_workers=2)
# mapped_prototypes2 = compute_features(tg_feature_model, evalloader, num_samples, num_features)
# D2 = mapped_prototypes2.T
# D2 = D2/np.linalg.norm(D2,axis=0)
D2 = D
# iCaRL
alph = alpha_dr_herding[iteration2,:,iter_dico]
assert((alph[num_samples:]==0).all())
alph = alph[:num_samples]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
# X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico,np.where(alph==1)[0]])
X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico][np.where(alph==1)[0]])
Y_protoset_cumuls.append(order[iteration2*args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0])))
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
# Normal NCM
# alph = np.ones(dictionary_size)/dictionary_size
alph = np.ones(num_samples)/num_samples
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
torch.save(class_means, \
'./checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration))
current_means = class_means[:, order[range(0,(iteration+1)*args.nb_cl)]]
##############################################################
# Calculate validation error of model on the first nb_cl classes:
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
print('Computing accuracy on the original batch of classes...')
# evalset.test_data = X_valid_ori.astype('uint8')
# evalset.test_labels = map_Y_valid_ori
current_eval_set = merge_images_labels(X_valid_ori, map_Y_valid_ori)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T
##############################################################
# Calculate validation error of model on the cumul of classes:
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
print('Computing cumulative accuracy...')
# evalset.test_data = X_valid_cumul.astype('uint8')
# evalset.test_labels = map_Y_valid_cumul
current_eval_set = merge_images_labels(X_valid_cumul, map_Y_valid_cumul)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T
##############################################################
# Calculate confusion matrix
# print('Computing confusion matrix...')
# cm = compute_confusion_matrix(tg_model, tg_feature_model, current_means, evalloader)
# cm_name = './checkpoint/{}_run_{}_iteration_{}_confusion_matrix.pth'.format(args.ckp_prefix,iteration_total, iteration)
# with open(cm_name, 'wb') as f:
# pickle.dump(cm, f, 2) #for reading with Python 2
##############################################################
# Final save of the data
torch.save(top1_acc_list_ori, \
'./checkpoint/{}_run_{}_top1_acc_list_ori.pth'.format(args.ckp_prefix, iteration_total))
torch.save(top1_acc_list_cumul, \
'./checkpoint/{}_run_{}_top1_acc_list_cumul.pth'.format(args.ckp_prefix, iteration_total))
| 30,418 | 53.809009 | 132 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/gen_resized_imagenet.py | #!/usr/bin/env python
# coding=utf-8
import argparse
import os
import random
import shutil
import time
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from PIL import Image
src_root_dir = 'data/imagenet/data/'
des_root_dir = 'data/imagenet_resized_256/data/'
if not os.path.exists(des_root_dir):
os.makedirs(des_root_dir)
phase_list = ['train', 'val']
for phase in phase_list:
if not os.path.exists(os.path.join(des_root_dir, phase)):
os.mkdir(os.path.join(des_root_dir, phase))
data_dir = os.path.join(src_root_dir, phase)
tg_dataset = datasets.ImageFolder(data_dir)
for cls_name in tg_dataset.classes:
if not os.path.exists(os.path.join(des_root_dir, phase, cls_name)):
os.mkdir(os.path.join(des_root_dir, phase, cls_name))
cnt = 0
for item in tg_dataset.imgs:
img_path = item[0]
img = Image.open(img_path)
img = img.convert('RGB')
save_path = img_path.replace('imagenet', 'imagenet_resized_256')
resized_img = img.resize((256,256), Image.BILINEAR)
resized_img.save(save_path)
cnt = cnt+1
if cnt % 1000 == 0:
print(cnt, save_path)
print("Hello World")
| 1,507 | 28 | 75 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/modified_linear.py | import math
import torch
from torch.nn.parameter import Parameter
from torch.nn import functional as F
from torch.nn import Module
class CosineLinear(Module):
def __init__(self, in_features, out_features, sigma=True):
super(CosineLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if sigma:
self.sigma = Parameter(torch.Tensor(1))
else:
self.register_parameter('sigma', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.sigma is not None:
self.sigma.data.fill_(1) #for initializaiton of sigma
def forward(self, input):
#w_norm = self.weight.data.norm(dim=1, keepdim=True)
#w_norm = w_norm.expand_as(self.weight).add_(self.epsilon)
#x_norm = input.data.norm(dim=1, keepdim=True)
#x_norm = x_norm.expand_as(input).add_(self.epsilon)
#w = self.weight.div(w_norm)
#x = input.div(x_norm)
out = F.linear(F.normalize(input, p=2,dim=1), \
F.normalize(self.weight, p=2, dim=1))
if self.sigma is not None:
out = self.sigma * out
return out
class SplitCosineLinear(Module):
#consists of two fc layers and concatenate their outputs
def __init__(self, in_features, out_features1, out_features2, sigma=True):
super(SplitCosineLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features1 + out_features2
self.fc1 = CosineLinear(in_features, out_features1, False)
self.fc2 = CosineLinear(in_features, out_features2, False)
if sigma:
self.sigma = Parameter(torch.Tensor(1))
self.sigma.data.fill_(1)
else:
self.register_parameter('sigma', None)
def forward(self, x):
out1 = self.fc1(x)
out2 = self.fc2(x)
out = torch.cat((out1, out2), dim=1) #concatenate along the channel
if self.sigma is not None:
out = self.sigma * out
return out | 2,235 | 36.898305 | 78 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/utils_imagenet/train_and_eval.py | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from .utils_train import *
def train_and_eval(epochs, start_epoch, model, optimizer, lr_scheduler, \
train_loader, val_loader, gpu=None):
for epoch in range(start_epoch, epochs):
#adjust_learning_rate(optimizer, epoch)
lr_scheduler.step()
print('\nEpoch: %d, LR: ' % epoch, end='')
print(lr_scheduler.get_lr())
# train for one epoch
train(train_loader, model, optimizer, epoch, gpu)
# evaluate on validation set
validate(val_loader, model, gpu)
return model
def train(train_loader, model, optimizer, epoch, gpu=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
criterion = nn.CrossEntropyLoss().cuda(gpu)
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if gpu is not None:
input = input.cuda(gpu, non_blocking=True)
target = target.cuda(gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
| 2,629 | 30.309524 | 75 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/imagenet-class-incremental/utils_imagenet/utils_train.py | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
def validate(val_loader, model, gpu=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
criterion = nn.CrossEntropyLoss().cuda(gpu)
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if gpu is not None:
input = input.cuda(gpu, non_blocking=True)
target = target.cuda(gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| 2,897 | 29.505263 | 78 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/cifar100-class-incremental/utils_pytorch.py | #!/usr/bin/env python
# coding=utf-8
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.nn.init as init
from collections import OrderedDict
import numpy as np
import os
import os.path as osp
import sys
import time
import math
import subprocess
try:
import cPickle as pickle
except:
import pickle
def savepickle(data, file_path):
mkdir_p(osp.dirname(file_path), delete=False)
print('pickle into', file_path)
with open(file_path, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def unpickle(file_path):
with open(file_path, 'rb') as f:
data = pickle.load(f)
return data
def mkdir_p(path, delete=False, print_info=True):
if path == '': return
if delete:
subprocess.call(('rm -r ' + path).split())
if not osp.exists(path):
if print_info:
print('mkdir -p ' + path)
subprocess.call(('mkdir -p ' + path).split())
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=1e-3)
if m.bias is not None:
init.constant_(m.bias, 0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f | 4,102 | 26.172185 | 96 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/cifar100-class-incremental/eval_cumul_acc.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import sys
import copy
import argparse
from PIL import Image
try:
import cPickle as pickle
except:
import pickle
from scipy.spatial.distance import cdist
import modified_resnet_cifar
import modified_linear
import utils_pytorch
from utils_incremental.compute_features import compute_features
from utils_incremental.compute_accuracy import compute_accuracy
from utils_incremental.compute_confusion_matrix import compute_confusion_matrix
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
######### Modifiable Settings ##########
parser = argparse.ArgumentParser()
parser.add_argument('--nb_cl', default=10, type=int, \
help='Classes per group')
parser.add_argument('--ckp_prefix', \
default='checkpoint/class_incremental_cifar100_nb_cl_10_nb_protos_20_run_0_', \
type=str)
parser.add_argument('--order', \
default='./checkpoint/cifar100_order_run_0.pkl', \
type=str)
parser.add_argument('--nb_cl_fg', default=50, type=int, \
help='the number of classes in first group')
args = parser.parse_args()
print(args)
order = utils_pytorch.unpickle(args.order)
order_list = list(order)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
])
evalset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=False, transform=transform_test)
input_data = evalset.test_data
input_labels = evalset.test_labels
map_input_labels = np.array([order_list.index(i) for i in input_labels])
#evalset.test_labels = map_input_labels
#evalloader = torch.utils.data.DataLoader(evalset, batch_size=128,
# shuffle=False, num_workers=2)
cnn_cumul_acc = []
icarl_cumul_acc = []
ncm_cumul_acc = []
num_classes = []
nb_cl = args.nb_cl
start_iter = int(args.nb_cl_fg/nb_cl)-1
for iteration in range(start_iter, int(100/nb_cl)):
#print("###########################################################")
#print("For iteration {}".format(iteration))
#print("###########################################################")
ckp_name = '{}iteration_{}_model.pth'.format(args.ckp_prefix, iteration)
class_means_name = '{}iteration_{}_class_means.pth'.format(args.ckp_prefix, iteration)
tg_model = torch.load(ckp_name)
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
class_means = torch.load(class_means_name)
current_means = class_means[:, order[:(iteration+1)*nb_cl]]
indices = np.array([i in range(0, (iteration+1)*nb_cl) for i in map_input_labels])
evalset.test_data = input_data[indices]
evalset.test_labels = map_input_labels[indices]
#print('Max and Min of valid labels: {}, {}'.format(min(evalset.test_labels), max(evalset.test_labels)))
evalloader = torch.utils.data.DataLoader(evalset, batch_size=128,
shuffle=False, num_workers=2)
acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader, print_info=False)
cnn_cumul_acc.append(acc[0])
icarl_cumul_acc.append(acc[1])
ncm_cumul_acc.append(acc[2])
num_classes.append((iteration+1)*nb_cl)
print("###########################################################")
print(' CNN acc: \t iCaRL acc \t NCM acc')
print("###########################################################")
for i in range(len(cnn_cumul_acc)):
print("{:.2f} ".format(cnn_cumul_acc[i]), end='')
print("[{:.2f}] ".format(np.mean(cnn_cumul_acc[-1])), end='')
print("[{:.2f}] ".format(np.mean(cnn_cumul_acc)), end='')
print("[{:.2f}] ".format(np.sum(np.array(cnn_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='')
print("")
for i in range(len(icarl_cumul_acc)):
print("{:.2f} ".format(icarl_cumul_acc[i]), end='')
print("[{:.2f}] ".format(np.mean(icarl_cumul_acc[-1])), end='')
print("[{:.2f}] ".format(np.mean(icarl_cumul_acc)), end='')
print("[{:.2f}] ".format(np.sum(np.array(icarl_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='')
print("")
for i in range(len(cnn_cumul_acc)):
print("{:.2f} ".format(ncm_cumul_acc[i]), end='')
print("[{:.2f}] ".format(np.mean(ncm_cumul_acc[-1])), end='')
print("[{:.2f}] ".format(np.mean(ncm_cumul_acc)), end='')
print("[{:.2f}] ".format(np.sum(np.array(ncm_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='')
print("")
print("###########################################################")
print("")
print('Number of classes', num_classes)
print("###########################################################")
print("Final acc on all classes")
print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format(cnn_cumul_acc[-1], icarl_cumul_acc[-1], ncm_cumul_acc[-1]))
print("###########################################################")
print("Average acc in each phase")
print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format(np.mean(cnn_cumul_acc), np.mean(icarl_cumul_acc), np.mean(ncm_cumul_acc)))
print("###########################################################")
print("Weighted average acc in each phase")
print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format(
np.sum(np.array(cnn_cumul_acc)*np.array(num_classes)) / np.sum(num_classes),
np.sum(np.array(icarl_cumul_acc)*np.array(num_classes)) / np.sum(num_classes),
np.sum(np.array(ncm_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)
))
print("###########################################################") | 5,687 | 43.4375 | 128 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/cifar100-class-incremental/class_incremental_cifar100.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import sys
import copy
import argparse
from PIL import Image
try:
import cPickle as pickle
except:
import pickle
import resnet_cifar
import utils_pytorch
from utils_incremental.compute_features import compute_features
from utils_incremental.compute_accuracy import compute_accuracy
from utils_incremental.compute_confusion_matrix import compute_confusion_matrix
from utils_incremental.incremental_train_and_eval import incremental_train_and_eval
######### Modifiable Settings ##########
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='cifar100', type=str)
parser.add_argument('--num_classes', default=100, type=int)
parser.add_argument('--nb_cl_fg', default=50, type=int, \
help='the number of classes in first group')
parser.add_argument('--nb_cl', default=10, type=int, \
help='Classes per group')
parser.add_argument('--nb_protos', default=20, type=int, \
help='Number of prototypes per class at the end')
parser.add_argument('--nb_runs', default=1, type=int, \
help='Number of runs (random ordering of classes at each run)')
parser.add_argument('--ckp_prefix', default=os.path.basename(sys.argv[0])[:-3], type=str, \
help='Checkpoint prefix')
parser.add_argument('--epochs', default=160, type=int, \
help='Epochs')
parser.add_argument('--T', default=2, type=float, \
help='Temperature for distialltion')
parser.add_argument('--beta', default=0.25, type=float, \
help='Beta for distialltion')
parser.add_argument('--resume', action='store_true', \
help='resume from checkpoint')
parser.add_argument('--fix_budget', action='store_true', \
help='fix budget')
parser.add_argument('--rs_ratio', default=0, type=float, \
help='The ratio for resample')
parser.add_argument('--random_seed', default=1993, type=int, \
help='random seed')
args = parser.parse_args()
########################################
assert(args.nb_cl_fg % args.nb_cl == 0)
assert(args.nb_cl_fg >= args.nb_cl)
train_batch_size = 128 # Batch size for train
test_batch_size = 100 # Batch size for test
eval_batch_size = 128 # Batch size for eval
base_lr = 0.1 # Initial learning rate
lr_strat = [80, 120] # Epochs where learning rate gets decreased
lr_factor = 0.1 # Learning rate decrease factor
custom_weight_decay = 5e-4 # Weight Decay
custom_momentum = 0.9 # Momentum
args.ckp_prefix = '{}_nb_cl_fg_{}_nb_cl_{}_nb_protos_{}'.format(args.ckp_prefix, args.nb_cl_fg, args.nb_cl, args.nb_protos)
np.random.seed(args.random_seed) # Fix the random seed
print(args)
########################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
])
trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=True, transform=transform_test)
evalset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=False, transform=transform_test)
# Initialization
dictionary_size = 500
top1_acc_list_cumul = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs))
top1_acc_list_ori = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs))
X_train_total = np.array(trainset.train_data)
Y_train_total = np.array(trainset.train_labels)
X_valid_total = np.array(testset.test_data)
Y_valid_total = np.array(testset.test_labels)
# Launch the different runs
for iteration_total in range(args.nb_runs):
# Select the order for the class learning
order_name = "./checkpoint/seed_{}_{}_order_run_{}.pkl".format(args.random_seed, args.dataset, iteration_total)
print("Order name:{}".format(order_name))
if os.path.exists(order_name):
print("Loading orders")
order = utils_pytorch.unpickle(order_name)
else:
print("Generating orders")
order = np.arange(args.num_classes)
np.random.shuffle(order)
utils_pytorch.savepickle(order, order_name)
order_list = list(order)
print(order_list)
# Initialization of the variables for this run
X_valid_cumuls = []
X_protoset_cumuls = []
X_train_cumuls = []
Y_valid_cumuls = []
Y_protoset_cumuls = []
Y_train_cumuls = []
alpha_dr_herding = np.zeros((int(args.num_classes/args.nb_cl),dictionary_size,args.nb_cl),np.float32)
# The following contains all the training samples of the different classes
# because we want to compare our method with the theoretical case where all the training samples are stored
prototypes = np.zeros((args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3]))
for orde in range(args.num_classes):
prototypes[orde,:,:,:,:] = X_train_total[np.where(Y_train_total==order[orde])]
start_iter = int(args.nb_cl_fg/args.nb_cl)-1
for iteration in range(start_iter, int(args.num_classes/args.nb_cl)):
#init model
if iteration == start_iter:
############################################################
last_iter = 0
############################################################
tg_model = resnet_cifar.resnet32(num_classes=args.nb_cl_fg)
ref_model = None
else:
############################################################
last_iter = iteration
############################################################
#increment classes
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
new_fc = nn.Linear(in_features, out_features+args.nb_cl)
new_fc.weight.data[:out_features] = tg_model.fc.weight.data
new_fc.bias.data[:out_features] = tg_model.fc.bias.data
tg_model.fc = new_fc
# Prepare the training data for the current batch of classes
actual_cl = order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)]
indices_train_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_train_total])
indices_test_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_valid_total])
X_train = X_train_total[indices_train_10]
X_valid = X_valid_total[indices_test_10]
X_valid_cumuls.append(X_valid)
X_train_cumuls.append(X_train)
X_valid_cumul = np.concatenate(X_valid_cumuls)
X_train_cumul = np.concatenate(X_train_cumuls)
Y_train = Y_train_total[indices_train_10]
Y_valid = Y_valid_total[indices_test_10]
Y_valid_cumuls.append(Y_valid)
Y_train_cumuls.append(Y_train)
Y_valid_cumul = np.concatenate(Y_valid_cumuls)
Y_train_cumul = np.concatenate(Y_train_cumuls)
# Add the stored exemplars to the training data
if iteration == start_iter:
X_valid_ori = X_valid
Y_valid_ori = Y_valid
else:
X_protoset = np.concatenate(X_protoset_cumuls)
Y_protoset = np.concatenate(Y_protoset_cumuls)
if args.rs_ratio > 0:
#1/rs_ratio = (len(X_train)+len(X_protoset)*scale_factor)/(len(X_protoset)*scale_factor)
scale_factor = (len(X_train) * args.rs_ratio) / (len(X_protoset) * (1 - args.rs_ratio))
rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor))
#number of samples per epoch, undersample on the new classes
#rs_num_samples = len(X_train) + len(X_protoset)
rs_num_samples = int(len(X_train) / (1 - args.rs_ratio))
print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples))
X_train = np.concatenate((X_train,X_protoset),axis=0)
Y_train = np.concatenate((Y_train,Y_protoset))
# Launch the training loop
print('Batch of classes number {0} arrives ...'.format(iteration+1))
map_Y_train = np.array([order_list.index(i) for i in Y_train])
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
############################################################
trainset.train_data = X_train.astype('uint8')
trainset.train_labels = map_Y_train
if iteration > start_iter and args.rs_ratio > 0 and scale_factor > 1:
print("Weights from sampling:", rs_sample_weights)
index1 = np.where(rs_sample_weights>1)[0]
index2 = np.where(map_Y_train<iteration*args.nb_cl)[0]
assert((index1==index2).all())
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \
shuffle=False, sampler=train_sampler, num_workers=2)
else:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
shuffle=True, num_workers=2)
testset.test_data = X_valid_cumul.astype('uint8')
testset.test_labels = map_Y_valid_cumul
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size,
shuffle=False, num_workers=2)
print('Max and Min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train)))
print('Max and Min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul)))
##############################################################
ckp_name = './checkpoint/{}_run_{}_iteration_{}_model.pth'.format(args.ckp_prefix, iteration_total, iteration)
print('ckp_name', ckp_name)
if args.resume and os.path.exists(ckp_name):
print("###############################")
print("Loading models from checkpoint")
tg_model = torch.load(ckp_name)
print("###############################")
else:
tg_params = tg_model.parameters()
tg_model = tg_model.to(device)
if iteration > start_iter:
ref_model = ref_model.to(device)
tg_optimizer = optim.SGD(tg_params, lr=base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay)
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=lr_strat, gamma=lr_factor)
tg_model = incremental_train_and_eval(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
args.T, args.beta)
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(tg_model, ckp_name)
### Exemplars
if args.fix_budget:
nb_protos_cl = int(np.ceil(args.nb_protos*100./args.nb_cl/(iteration+1)))
else:
nb_protos_cl = args.nb_protos
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
# Herding
print('Updating exemplar set...')
for iter_dico in range(last_iter*args.nb_cl, (iteration+1)*args.nb_cl):
# Possible exemplars in the feature space and projected on the L2 sphere
evalset.test_data = prototypes[iter_dico].astype('uint8')
evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
num_samples = evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
# Herding procedure : ranking of the potential exemplars
mu = np.mean(D,axis=1)
index1 = int(iter_dico/args.nb_cl)
index2 = iter_dico % args.nb_cl
alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0
w_t = mu
iter_herding = 0
iter_herding_eff = 0
while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000:
tmp_t = np.dot(w_t,D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if alpha_dr_herding[index1,ind_max,index2] == 0:
alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding
iter_herding += 1
w_t = w_t+mu-D[:,ind_max]
# Prepare the protoset
X_protoset_cumuls = []
Y_protoset_cumuls = []
# Class means for iCaRL and NCM + Storing the selected exemplars in the protoset
print('Computing mean-of_exemplars and theoretical mean...')
class_means = np.zeros((64,100,2))
for iteration2 in range(iteration+1):
for iter_dico in range(args.nb_cl):
current_cl = order[range(iteration2*args.nb_cl,(iteration2+1)*args.nb_cl)]
# Collect data in the feature space for each class
evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico].astype('uint8')
evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
num_samples = evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
# Flipped version also
evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
mapped_prototypes2 = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D2 = mapped_prototypes2.T
D2 = D2/np.linalg.norm(D2,axis=0)
# iCaRL
alph = alpha_dr_herding[iteration2,:,iter_dico]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico,np.where(alph==1)[0]])
Y_protoset_cumuls.append(order[iteration2*args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0])))
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
# Normal NCM
alph = np.ones(dictionary_size)/dictionary_size
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
torch.save(class_means, \
'./checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration))
current_means = class_means[:, order[range(0,(iteration+1)*args.nb_cl)]]
##############################################################
# Calculate validation error of model on the first nb_cl classes:
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
print('Computing accuracy on the original batch of classes...')
evalset.test_data = X_valid_ori.astype('uint8')
evalset.test_labels = map_Y_valid_ori
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T
##############################################################
# Calculate validation error of model on the cumul of classes:
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
print('Computing cumulative accuracy...')
evalset.test_data = X_valid_cumul.astype('uint8')
evalset.test_labels = map_Y_valid_cumul
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T
##############################################################
# Calculate confusion matrix
print('Computing confusion matrix...')
cm = compute_confusion_matrix(tg_model, tg_feature_model, current_means, evalloader)
cm_name = './checkpoint/{}_run_{}_iteration_{}_confusion_matrix.pth'.format(args.ckp_prefix,iteration_total, iteration)
with open(cm_name, 'wb') as f:
pickle.dump(cm, f, 2) #for reading with Python 2
##############################################################
# Final save of the data
torch.save(top1_acc_list_ori, \
'./checkpoint/{}_run_{}_top1_acc_list_ori.pth'.format(args.ckp_prefix, iteration_total))
torch.save(top1_acc_list_cumul, \
'./checkpoint/{}_run_{}_top1_acc_list_cumul.pth'.format(args.ckp_prefix, iteration_total))
| 18,765 | 51.565826 | 130 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/cifar100-class-incremental/resnet_cifar.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10):
self.inplanes = 16
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet20(pretrained=False, **kwargs):
n = 3
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
def resnet32(pretrained=False, **kwargs):
n = 5
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
def resnet56(pretrained=False, **kwargs):
n = 9
model = ResNet(Bottleneck, [n, n, n], **kwargs)
return model
| 4,525 | 29.375839 | 90 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/cifar100-class-incremental/modified_resnet_cifar.py | #remove ReLU in the last layer, and use cosine layer to replace nn.Linear
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import modified_linear
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, last=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.last = last
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if not self.last: #remove ReLU in the last layer
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10):
self.inplanes = 16
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2, last_phase=True)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = modified_linear.CosineLinear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, last_phase=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
if last_phase:
for i in range(1, blocks-1):
layers.append(block(self.inplanes, planes))
layers.append(block(self.inplanes, planes, last=True))
else:
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet20(pretrained=False, **kwargs):
n = 3
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
def resnet32(pretrained=False, **kwargs):
n = 5
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
| 3,716 | 31.893805 | 87 | py |
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/cifar100-class-incremental/class_incremental_cosine_cifar100.py | #!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import sys
import copy
import argparse
from PIL import Image
try:
import cPickle as pickle
except:
import pickle
import math
import modified_resnet_cifar
import modified_linear
import utils_pytorch
from utils_incremental.compute_features import compute_features
from utils_incremental.compute_accuracy import compute_accuracy
from utils_incremental.compute_confusion_matrix import compute_confusion_matrix
from utils_incremental.incremental_train_and_eval import incremental_train_and_eval
from utils_incremental.incremental_train_and_eval_MS import incremental_train_and_eval_MS
from utils_incremental.incremental_train_and_eval_LF import incremental_train_and_eval_LF
from utils_incremental.incremental_train_and_eval_MR_LF import incremental_train_and_eval_MR_LF
from utils_incremental.incremental_train_and_eval_AMR_LF import incremental_train_and_eval_AMR_LF
######### Modifiable Settings ##########
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='cifar100', type=str)
parser.add_argument('--num_classes', default=100, type=int)
parser.add_argument('--nb_cl_fg', default=50, type=int, \
help='the number of classes in first group')
parser.add_argument('--nb_cl', default=10, type=int, \
help='Classes per group')
parser.add_argument('--nb_protos', default=20, type=int, \
help='Number of prototypes per class at the end')
parser.add_argument('--nb_runs', default=1, type=int, \
help='Number of runs (random ordering of classes at each run)')
parser.add_argument('--ckp_prefix', default=os.path.basename(sys.argv[0])[:-3], type=str, \
help='Checkpoint prefix')
parser.add_argument('--epochs', default=160, type=int, \
help='Epochs')
parser.add_argument('--T', default=2, type=float, \
help='Temporature for distialltion')
parser.add_argument('--beta', default=0.25, type=float, \
help='Beta for distialltion')
parser.add_argument('--resume', action='store_true', \
help='resume from checkpoint')
parser.add_argument('--fix_budget', action='store_true', \
help='fix budget')
########################################
parser.add_argument('--mimic_score', action='store_true', \
help='To mimic scores for cosine embedding')
parser.add_argument('--lw_ms', default=1, type=float, \
help='loss weight for mimicking score')
########################################
#improved class incremental learning
parser.add_argument('--rs_ratio', default=0, type=float, \
help='The ratio for resample')
parser.add_argument('--imprint_weights', action='store_true', \
help='Imprint the weights for novel classes')
parser.add_argument('--less_forget', action='store_true', \
help='Less forgetful')
parser.add_argument('--lamda', default=5, type=float, \
help='Lamda for LF')
parser.add_argument('--adapt_lamda', action='store_true', \
help='Adaptively change lamda')
parser.add_argument('--mr_loss', action='store_true', \
help='Margin ranking loss v1')
parser.add_argument('--amr_loss', action='store_true', \
help='Margin ranking loss v2')
parser.add_argument('--dist', default=0.5, type=float, \
help='Dist for MarginRankingLoss')
parser.add_argument('--K', default=2, type=int, \
help='K for MarginRankingLoss')
parser.add_argument('--lw_mr', default=1, type=float, \
help='loss weight for margin ranking loss')
########################################
parser.add_argument('--random_seed', default=1993, type=int, \
help='random seed')
args = parser.parse_args()
########################################
assert(args.nb_cl_fg % args.nb_cl == 0)
assert(args.nb_cl_fg >= args.nb_cl)
train_batch_size = 128 # Batch size for train
test_batch_size = 100 # Batch size for test
eval_batch_size = 128 # Batch size for eval
base_lr = 0.1 # Initial learning rate
lr_strat = [80, 120] # Epochs where learning rate gets decreased
lr_factor = 0.1 # Learning rate decrease factor
custom_weight_decay = 5e-4 # Weight Decay
custom_momentum = 0.9 # Momentum
args.ckp_prefix = '{}_nb_cl_fg_{}_nb_cl_{}_nb_protos_{}'.format(args.ckp_prefix, args.nb_cl_fg, args.nb_cl, args.nb_protos)
np.random.seed(args.random_seed) # Fix the random seed
print(args)
########################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
])
trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=True, transform=transform_test)
evalset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=False, transform=transform_test)
# Initialization
dictionary_size = 500
top1_acc_list_cumul = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs))
top1_acc_list_ori = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs))
X_train_total = np.array(trainset.train_data)
Y_train_total = np.array(trainset.train_labels)
X_valid_total = np.array(testset.test_data)
Y_valid_total = np.array(testset.test_labels)
# Launch the different runs
for iteration_total in range(args.nb_runs):
# Select the order for the class learning
order_name = "./checkpoint/seed_{}_{}_order_run_{}.pkl".format(args.random_seed, args.dataset, iteration_total)
print("Order name:{}".format(order_name))
if os.path.exists(order_name):
print("Loading orders")
order = utils_pytorch.unpickle(order_name)
else:
print("Generating orders")
order = np.arange(args.num_classes)
np.random.shuffle(order)
utils_pytorch.savepickle(order, order_name)
order_list = list(order)
print(order_list)
# Initialization of the variables for this run
X_valid_cumuls = []
X_protoset_cumuls = []
X_train_cumuls = []
Y_valid_cumuls = []
Y_protoset_cumuls = []
Y_train_cumuls = []
alpha_dr_herding = np.zeros((int(args.num_classes/args.nb_cl),dictionary_size,args.nb_cl),np.float32)
# The following contains all the training samples of the different classes
# because we want to compare our method with the theoretical case where all the training samples are stored
prototypes = np.zeros((args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3]))
for orde in range(args.num_classes):
prototypes[orde,:,:,:,:] = X_train_total[np.where(Y_train_total==order[orde])]
start_iter = int(args.nb_cl_fg/args.nb_cl)-1
for iteration in range(start_iter, int(args.num_classes/args.nb_cl)):
#init model
if iteration == start_iter:
############################################################
last_iter = 0
############################################################
tg_model = modified_resnet_cifar.resnet32(num_classes=args.nb_cl_fg)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("in_features:", in_features, "out_features:", out_features)
ref_model = None
elif iteration == start_iter+1:
############################################################
last_iter = iteration
############################################################
#increment classes
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("in_features:", in_features, "out_features:", out_features)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features, args.nb_cl)
new_fc.fc1.weight.data = tg_model.fc.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = out_features*1.0 / args.nb_cl
else:
############################################################
last_iter = iteration
############################################################
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features1 = tg_model.fc.fc1.out_features
out_features2 = tg_model.fc.fc2.out_features
print("in_features:", in_features, "out_features1:", \
out_features1, "out_features2:", out_features2)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features1+out_features2, args.nb_cl)
new_fc.fc1.weight.data[:out_features1] = tg_model.fc.fc1.weight.data
new_fc.fc1.weight.data[out_features1:] = tg_model.fc.fc2.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = (out_features1+out_features2)*1.0 / (args.nb_cl)
if iteration > start_iter and args.less_forget and args.adapt_lamda:
#cur_lamda = lamda_base * sqrt(num_old_classes/num_new_classes)
cur_lamda = args.lamda * math.sqrt(lamda_mult)
else:
cur_lamda = args.lamda
if iteration > start_iter and args.less_forget:
print("###############################")
print("Lamda for less forget is set to ", cur_lamda)
print("###############################")
# Prepare the training data for the current batch of classes
actual_cl = order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)]
indices_train_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_train_total])
indices_test_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_valid_total])
X_train = X_train_total[indices_train_10]
X_valid = X_valid_total[indices_test_10]
X_valid_cumuls.append(X_valid)
X_train_cumuls.append(X_train)
X_valid_cumul = np.concatenate(X_valid_cumuls)
X_train_cumul = np.concatenate(X_train_cumuls)
Y_train = Y_train_total[indices_train_10]
Y_valid = Y_valid_total[indices_test_10]
Y_valid_cumuls.append(Y_valid)
Y_train_cumuls.append(Y_train)
Y_valid_cumul = np.concatenate(Y_valid_cumuls)
Y_train_cumul = np.concatenate(Y_train_cumuls)
# Add the stored exemplars to the training data
if iteration == start_iter:
X_valid_ori = X_valid
Y_valid_ori = Y_valid
else:
X_protoset = np.concatenate(X_protoset_cumuls)
Y_protoset = np.concatenate(Y_protoset_cumuls)
if args.rs_ratio > 0:
#1/rs_ratio = (len(X_train)+len(X_protoset)*scale_factor)/(len(X_protoset)*scale_factor)
scale_factor = (len(X_train) * args.rs_ratio) / (len(X_protoset) * (1 - args.rs_ratio))
rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor))
#number of samples per epoch, undersample on the new classes
#rs_num_samples = len(X_train) + len(X_protoset)
rs_num_samples = int(len(X_train) / (1 - args.rs_ratio))
print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples))
X_train = np.concatenate((X_train,X_protoset),axis=0)
Y_train = np.concatenate((Y_train,Y_protoset))
# Launch the training loop
print('Batch of classes number {0} arrives ...'.format(iteration+1))
map_Y_train = np.array([order_list.index(i) for i in Y_train])
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
#imprint weights
if iteration > start_iter and args.imprint_weights:
#input: tg_model, X_train, map_Y_train
#class_start = iteration*nb_cl class_end = (iteration+1)*nb_cl
print("Imprint weights")
#########################################
#compute the average norm of old embdding
old_embedding_norm = tg_model.fc.fc1.weight.data.norm(dim=1, keepdim=True)
average_old_embedding_norm = torch.mean(old_embedding_norm, dim=0).to('cpu').type(torch.DoubleTensor)
#########################################
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
novel_embedding = torch.zeros((args.nb_cl, num_features))
for cls_idx in range(iteration*args.nb_cl, (iteration+1)*args.nb_cl):
cls_indices = np.array([i == cls_idx for i in map_Y_train])
assert(len(np.where(cls_indices==1)[0])==dictionary_size)
evalset.test_data = X_train[cls_indices].astype('uint8')
evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
num_samples = evalset.test_data.shape[0]
cls_features = compute_features(tg_feature_model, evalloader, num_samples, num_features)
#cls_features = cls_features.T
#cls_features = cls_features / np.linalg.norm(cls_features,axis=0)
#cls_embedding = np.mean(cls_features, axis=1)
norm_features = F.normalize(torch.from_numpy(cls_features), p=2, dim=1)
cls_embedding = torch.mean(norm_features, dim=0)
#novel_embedding[cls_idx-iteration*args.nb_cl] = cls_embedding
novel_embedding[cls_idx-iteration*args.nb_cl] = F.normalize(cls_embedding, p=2, dim=0) * average_old_embedding_norm
tg_model.to(device)
#torch.save(tg_model, "tg_model_before_imprint_weights.pth")
tg_model.fc.fc2.weight.data = novel_embedding.to(device)
#torch.save(tg_model, "tg_model_after_imprint_weights.pth")
############################################################
trainset.train_data = X_train.astype('uint8')
trainset.train_labels = map_Y_train
if iteration > start_iter and args.rs_ratio > 0 and scale_factor > 1:
print("Weights from sampling:", rs_sample_weights)
index1 = np.where(rs_sample_weights>1)[0]
index2 = np.where(map_Y_train<iteration*args.nb_cl)[0]
assert((index1==index2).all())
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \
shuffle=False, sampler=train_sampler, num_workers=2)
else:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
shuffle=True, num_workers=2)
testset.test_data = X_valid_cumul.astype('uint8')
testset.test_labels = map_Y_valid_cumul
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size,
shuffle=False, num_workers=2)
print('Max and Min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train)))
print('Max and Min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul)))
##############################################################
ckp_name = './checkpoint/{}_run_{}_iteration_{}_model.pth'.format(args.ckp_prefix, iteration_total, iteration)
print('ckp_name', ckp_name)
if args.resume and os.path.exists(ckp_name):
print("###############################")
print("Loading models from checkpoint")
tg_model = torch.load(ckp_name)
print("###############################")
else:
###############################
if iteration > start_iter and args.less_forget:
#fix the embedding of old classes
ignored_params = list(map(id, tg_model.fc.fc1.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, \
tg_model.parameters())
tg_params =[{'params': base_params, 'lr': base_lr, 'weight_decay': custom_weight_decay}, \
{'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}]
else:
tg_params = tg_model.parameters()
###############################
tg_model = tg_model.to(device)
if iteration > start_iter:
ref_model = ref_model.to(device)
tg_optimizer = optim.SGD(tg_params, lr=base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay)
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=lr_strat, gamma=lr_factor)
###############################
if args.less_forget and args.mr_loss:
print("incremental_train_and_eval_MR_LF")
tg_model = incremental_train_and_eval_MR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda, \
args.dist, args.K, args.lw_mr)
elif args.less_forget and args.amr_loss:
print("incremental_train_and_eval_AMR_LF")
tg_model = incremental_train_and_eval_AMR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda, \
args.dist, args.K, args.lw_mr)
else:
if args.less_forget:
print("incremental_train_and_eval_LF")
tg_model = incremental_train_and_eval_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter, \
cur_lamda)
else:
if args.mimic_score:
print("incremental_train_and_eval_MS")
tg_model = incremental_train_and_eval_MS(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter,
args.lw_ms)
else:
print("incremental_train_and_eval")
tg_model = incremental_train_and_eval(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
trainloader, testloader, \
iteration, start_iter,
args.T, args.beta)
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(tg_model, ckp_name)
### Exemplars
if args.fix_budget:
nb_protos_cl = int(np.ceil(args.nb_protos*100./args.nb_cl/(iteration+1)))
else:
nb_protos_cl = args.nb_protos
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
# Herding
print('Updating exemplar set...')
for iter_dico in range(last_iter*args.nb_cl, (iteration+1)*args.nb_cl):
# Possible exemplars in the feature space and projected on the L2 sphere
evalset.test_data = prototypes[iter_dico].astype('uint8')
evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
num_samples = evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
# Herding procedure : ranking of the potential exemplars
mu = np.mean(D,axis=1)
index1 = int(iter_dico/args.nb_cl)
index2 = iter_dico % args.nb_cl
alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0
w_t = mu
iter_herding = 0
iter_herding_eff = 0
while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000:
tmp_t = np.dot(w_t,D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if alpha_dr_herding[index1,ind_max,index2] == 0:
alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding
iter_herding += 1
w_t = w_t+mu-D[:,ind_max]
# Prepare the protoset
X_protoset_cumuls = []
Y_protoset_cumuls = []
# Class means for iCaRL and NCM + Storing the selected exemplars in the protoset
print('Computing mean-of_exemplars and theoretical mean...')
class_means = np.zeros((64,100,2))
for iteration2 in range(iteration+1):
for iter_dico in range(args.nb_cl):
current_cl = order[range(iteration2*args.nb_cl,(iteration2+1)*args.nb_cl)]
# Collect data in the feature space for each class
evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico].astype('uint8')
evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
num_samples = evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
# Flipped version also
evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
mapped_prototypes2 = compute_features(tg_feature_model, evalloader, num_samples, num_features)
D2 = mapped_prototypes2.T
D2 = D2/np.linalg.norm(D2,axis=0)
# iCaRL
alph = alpha_dr_herding[iteration2,:,iter_dico]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico,np.where(alph==1)[0]])
Y_protoset_cumuls.append(order[iteration2*args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0])))
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
# Normal NCM
alph = np.ones(dictionary_size)/dictionary_size
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
torch.save(class_means, \
'./checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration))
current_means = class_means[:, order[range(0,(iteration+1)*args.nb_cl)]]
##############################################################
# Calculate validation error of model on the first nb_cl classes:
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
print('Computing accuracy on the original batch of classes...')
evalset.test_data = X_valid_ori.astype('uint8')
evalset.test_labels = map_Y_valid_ori
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T
##############################################################
# Calculate validation error of model on the cumul of classes:
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
print('Computing cumulative accuracy...')
evalset.test_data = X_valid_cumul.astype('uint8')
evalset.test_labels = map_Y_valid_cumul
evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size,
shuffle=False, num_workers=2)
cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader)
top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T
##############################################################
# Calculate confusion matrix
print('Computing confusion matrix...')
cm = compute_confusion_matrix(tg_model, tg_feature_model, current_means, evalloader)
cm_name = './checkpoint/{}_run_{}_iteration_{}_confusion_matrix.pth'.format(args.ckp_prefix,iteration_total, iteration)
with open(cm_name, 'wb') as f:
pickle.dump(cm, f, 2) #for reading with Python 2
##############################################################
# Final save of the data
torch.save(top1_acc_list_ori, \
'./checkpoint/{}_run_{}_top1_acc_list_ori.pth'.format(args.ckp_prefix, iteration_total))
torch.save(top1_acc_list_cumul, \
'./checkpoint/{}_run_{}_top1_acc_list_cumul.pth'.format(args.ckp_prefix, iteration_total))
| 26,967 | 53.370968 | 131 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.