repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
NeuralSpeech | NeuralSpeech-master/BinauralGrad/src/binauralgrad/warping.py | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
# reference: https://github.com/facebookresearch/BinauralSpeechSynthesis/blob/main/src/warping.py
import torch as th
import torch.nn as nn
import torch.nn.functional as F
class TimeWarperFunction(th.autograd.Function):
@staticmethod
def forward(ctx, input, warpfield):
'''
:param ctx: autograd context
:param input: input signal (B x 2 x T)
:param warpfield: the corresponding warpfield (B x 2 x T)
:return: the warped signal (B x 2 x T)
'''
ctx.save_for_backward(input, warpfield)
# compute index list to lookup warped input values
idx_left = warpfield.floor().type(th.long)
idx_right = th.clamp(warpfield.ceil().type(th.long), max=input.shape[-1]-1)
# compute weight for linear interpolation
alpha = warpfield - warpfield.floor()
# linear interpolation
output = (1 - alpha) * th.gather(input, 2, idx_left) + alpha * th.gather(input, 2, idx_right)
return output
@staticmethod
def backward(ctx, grad_output):
input, warpfield = ctx.saved_tensors
# compute index list to lookup warped input values
idx_left = warpfield.floor().type(th.long)
idx_right = th.clamp(warpfield.ceil().type(th.long), max=input.shape[-1]-1)
# warpfield gradient
grad_warpfield = th.gather(input, 2, idx_right) - th.gather(input, 2, idx_left)
grad_warpfield = grad_output * grad_warpfield
# input gradient
grad_input = th.zeros(input.shape, device=input.device)
alpha = warpfield - warpfield.floor()
grad_input = grad_input.scatter_add(2, idx_left, grad_output * (1 - alpha)) + \
grad_input.scatter_add(2, idx_right, grad_output * alpha)
return grad_input, grad_warpfield
class TimeWarper(nn.Module):
def __init__(self):
super().__init__()
self.warper = TimeWarperFunction().apply
def _to_absolute_positions(self, warpfield, seq_length):
# translate warpfield from relative warp indices to absolute indices ([1...T] + warpfield)
temp_range = th.arange(seq_length, dtype=th.float)
temp_range = temp_range.to(warpfield.device) if warpfield.is_cuda else temp_range
return th.clamp(warpfield + temp_range[None, None, :], min=0, max=seq_length-1)
def forward(self, input, warpfield):
'''
:param input: audio signal to be warped (B x 2 x T)
:param warpfield: the corresponding warpfield (B x 2 x T)
:return: the warped signal (B x 2 x T)
'''
warpfield = self._to_absolute_positions(warpfield, input.shape[-1])
warped = self.warper(input, warpfield)
return warped
class MonotoneTimeWarper(TimeWarper):
def forward(self, input, warpfield):
'''
:param input: audio signal to be warped (B x 2 x T)
:param warpfield: the corresponding warpfield (B x 2 x T)
:return: the warped signal (B x 2 x T), ensured to be monotonous
'''
warpfield = self._to_absolute_positions(warpfield, input.shape[-1])
# ensure monotonicity: each warp must be at least as big as previous_warp-1
warpfield = th.cummax(warpfield, dim=-1)[0]
# warp
warped = self.warper(input, warpfield)
return warped
class GeometricTimeWarper(TimeWarper):
def __init__(self, sampling_rate=48000):
super().__init__()
self.sampling_rate = sampling_rate
def displacements2warpfield(self, displacements, seq_length):
distance = th.sum(displacements**2, dim=2) ** 0.5
distance = F.interpolate(distance, size=seq_length)
warpfield = -distance / 343.0 * self.sampling_rate
return warpfield
def forward(self, input, displacements):
'''
:param input: audio signal to be warped (B x 2 x T)
:param displacements: sequence of 3D displacement vectors for geometric warping (B x 3 x T)
:return: the warped signal (B x 2 x T)
'''
warpfield = self.displacements2warpfield(displacements, input.shape[-1])
warped = super().forward(input, warpfield)
return warped
| 4,372 | 37.699115 | 101 | py |
NeuralSpeech | NeuralSpeech-master/BinauralGrad/src/binauralgrad/learner.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import os
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
#from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from binauralgrad.losses import PhaseLoss
from binauralgrad.dataset import from_path
from binauralgrad.model import BinauralGrad
from binauralgrad.params import AttrDict
def _nested_map(struct, map_fn):
if isinstance(struct, tuple):
return tuple(_nested_map(x, map_fn) for x in struct)
if isinstance(struct, list):
return [_nested_map(x, map_fn) for x in struct]
if isinstance(struct, dict):
return { k: _nested_map(v, map_fn) for k, v in struct.items() }
return map_fn(struct)
class BinauralGradLearner:
def __init__(self, model_dir, model, dataset, optimizer, params, *args, **kwargs):
os.makedirs(model_dir, exist_ok=True)
self.model_dir = model_dir
self.model = model
self.dataset = dataset
self.optimizer = optimizer
self.params = params
if params.lambda_phase != 0.0:
if not getattr(params, "use_mstft", False):
self.phase_loss = PhaseLoss(sample_rate=self.params.sample_rate)
else:
from binauralgrad.mstft_loss import MultiResolutionSTFTLoss
self.phase_loss = MultiResolutionSTFTLoss(
sample_rate=self.params.sample_rate,
w_phs=1.0,
device="cuda")
self.autocast = torch.cuda.amp.autocast(enabled=kwargs.get('fp16', False))
self.scaler = torch.cuda.amp.GradScaler(enabled=kwargs.get('fp16', False))
self.binaural_type = kwargs.get('binaural_type', "")
self.step = 0
self.is_master = True
beta = np.array(self.params.noise_schedule)
noise_level = np.cumprod(1 - beta)
self.noise_level = torch.tensor(noise_level.astype(np.float32))
if getattr(params, "use_l2_loss", False):
# if params.use_l2_loss:
self.loss_fn = nn.MSELoss()
else:
self.loss_fn = nn.L1Loss()
self.summary_writer = None
def state_dict(self):
if hasattr(self.model, 'module') and isinstance(self.model.module, nn.Module):
model_state = self.model.module.state_dict()
else:
model_state = self.model.state_dict()
return {
'step': self.step,
'model': { k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in model_state.items() },
'optimizer': { k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in self.optimizer.state_dict().items() },
'params': dict(self.params),
'scaler': self.scaler.state_dict(),
}
def load_state_dict(self, state_dict):
if hasattr(self.model, 'module') and isinstance(self.model.module, nn.Module):
self.model.module.load_state_dict(state_dict['model'])
else:
self.model.load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
self.scaler.load_state_dict(state_dict['scaler'])
self.step = state_dict['step']
def save_to_checkpoint(self, filename='weights'):
save_basename = f'{filename}-{self.step}.pt'
save_name = f'{self.model_dir}/{save_basename}'
link_name = f'{self.model_dir}/{filename}.pt'
torch.save(self.state_dict(), save_name)
#if os.name == 'nt':
# torch.save(self.state_dict(), link_name)
#else:
# if os.path.islink(link_name):
# os.unlink(link_name)
# os.symlink(save_basename, link_name)
def restore_from_checkpoint(self, filename='weights'):
try:
checkpoint = torch.load(f'{self.model_dir}/{filename}.pt')
self.load_state_dict(checkpoint)
return True
except FileNotFoundError:
return False
def train(self, max_steps=None):
device = next(self.model.parameters()).device
while True:
for features in tqdm(self.dataset, desc=f'Epoch {self.step // len(self.dataset)}') if self.is_master else self.dataset:
if max_steps is not None and self.step >= max_steps:
return
features = _nested_map(features, lambda x: x.to(device) if isinstance(x, torch.Tensor) else x)
loss = self.train_step(features)
if torch.isnan(loss).any():
raise RuntimeError(f'Detected NaN loss at step {self.step}.')
if self.is_master:
#if self.step % 50 == 0:
# self._write_summary(self.step, features, loss)
if self.step % (len(self.dataset) * 5)== 0: #save ckpt per 10 epoch
self.save_to_checkpoint()
self.step += 1
def train_step(self, features):
for param in self.model.parameters():
param.grad = None
if self.binaural_type:
audio = features['audio']
mono = features['mono']
binaural_geowarp = features['binaural_geowarp']
view = features['view']
mean_condition = features['mean_condition']
if getattr(self.params, "predict_mean_condition", False):
audio = mean_condition
else:
audio = features['audio']
spectrogram = features['spectrogram']
mean_condition = None
N, channel, T = audio.shape
device = audio.device
self.noise_level = self.noise_level.to(device)
with self.autocast:
t = torch.randint(0, len(self.params.noise_schedule), [N], device=audio.device)
noise_scale = self.noise_level[t[:, None].repeat(1, channel)].unsqueeze(2)
noise_scale_sqrt = noise_scale**0.5
noise = torch.randn_like(audio)
#print(audio.shape, t.shape, noise_scale.shape)
noisy_audio = noise_scale_sqrt * audio + (1.0 - noise_scale)**0.5 * noise
if self.binaural_type:
if self.params.loss_per_layer != 0:
predicted, extra_output, _ = self.model(noisy_audio, t, geowarp=binaural_geowarp, view=view, mono=mono, mean_condition=mean_condition)
else:
predicted = self.model(noisy_audio, t, spectrogram)
loss = self.loss_fn(noise, predicted)
if self.params.loss_per_layer != 0:
extra_loss = self.loss_fn(torch.cat([noise] * (len(extra_output)), dim=1), torch.cat(extra_output, dim=1))
if self.params.lambda_phase != 0.0:
noisy_extra = torch.cat([noise_scale_sqrt * audio] * (len(extra_output)), dim=1) + (1.0 - torch.cat([noise_scale] * (len(extra_output)), dim=1))**0.5 * torch.cat(extra_output, dim=1)
extra_loss += self.params.lambda_phase * self.phase_loss(torch.cat([noisy_audio] * (len(extra_output)), dim=1), noisy_extra)
loss += extra_loss
else:
if self.params.lambda_phase != 0.0:
noisy_predict = noise_scale_sqrt * audio + (1.0 - noise_scale)**0.5 * predicted
loss += self.params.lambda_phase * self.phase_loss(noisy_audio, noisy_predict)
self.scaler.scale(loss).backward()
self.scaler.unscale_(self.optimizer)
self.grad_norm = nn.utils.clip_grad_norm_(self.model.parameters(), self.params.max_grad_norm or 1e9)
self.scaler.step(self.optimizer)
self.scaler.update()
return loss
def _write_summary(self, step, features, loss):
writer = self.summary_writer or SummaryWriter(self.model_dir, purge_step=step)
writer.add_scalar('train/loss', loss, step)
writer.add_scalar('train/grad_norm', self.grad_norm, step)
writer.flush()
self.summary_writer = writer
def _train_impl(replica_id, model, dataset, args, params, binaural_type=""):
torch.backends.cudnn.benchmark = True
opt = torch.optim.Adam(model.parameters(), lr=params.learning_rate)
learner = BinauralGradLearner(args.model_dir, model, dataset, opt, params, fp16=args.fp16, binaural_type=binaural_type)
learner.is_master = (replica_id == 0)
learner.restore_from_checkpoint()
learner.train(max_steps=args.max_steps)
def train(args, params):
dataset = from_path(args.data_dirs, params, getattr(args, "binaural_type", ""))
model = BinauralGrad(params, binaural_type=getattr(args, "binaural_type", "")).cuda()
_train_impl(0, model, dataset, args, params, binaural_type=getattr(args, "binaural_type", ""))
def train_distributed(replica_id, replica_count, port, args, params):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(port)
torch.distributed.init_process_group('nccl', rank=replica_id, world_size=replica_count)
dataset = from_path(args.data_dirs, params, getattr(args, "binaural_type", ""), is_distributed=True)
device = torch.device('cuda', replica_id)
torch.cuda.set_device(device)
model = BinauralGrad(params, binaural_type=getattr(args, "binaural_type", "")).to(device)
#print(model)
model = DistributedDataParallel(model, device_ids=[replica_id])
_train_impl(replica_id, model, dataset, args, params, binaural_type=getattr(args, "binaural_type", ""))
| 8,812 | 40.375587 | 192 | py |
NeuralSpeech | NeuralSpeech-master/BinauralGrad/src/binauralgrad/model.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy
from scipy.spatial.transform import Rotation as R
from math import sqrt
Linear = nn.Linear
ConvTranspose2d = nn.ConvTranspose2d
def Conv1d(*args, **kwargs):
layer = nn.Conv1d(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer
@torch.jit.script
def silu(x):
return x * torch.sigmoid(x)
class DiffusionEmbedding(nn.Module):
def __init__(self, max_steps):
super().__init__()
self.register_buffer('embedding', self._build_embedding(max_steps), persistent=False)
self.projection1 = Linear(128, 512)
self.projection2 = Linear(512, 512)
def forward(self, diffusion_step):
if diffusion_step.dtype in [torch.int32, torch.int64]:
x = self.embedding[diffusion_step]
else:
x = self._lerp_embedding(diffusion_step)
x = self.projection1(x)
x = silu(x)
x = self.projection2(x)
x = silu(x)
return x
def _lerp_embedding(self, t):
low_idx = torch.floor(t).long()
high_idx = torch.ceil(t).long()
low = self.embedding[low_idx]
high = self.embedding[high_idx]
return low + (high - low) * (t - low_idx)
def _build_embedding(self, max_steps):
steps = torch.arange(max_steps).unsqueeze(1) # [T,1]
dims = torch.arange(64).unsqueeze(0) # [1,64]
table = steps * 10.0**(dims * 4.0 / 63.0) # [T,64]
table = torch.cat([torch.sin(table), torch.cos(table)], dim=1)
return table
class BinauralPreNet(nn.Module):
def __init__(self, n_mels, binaural_type="", addmono=False, use_mean_condition=False,
predict_mean_condition=False):
super().__init__()
self.conv_view1 = torch.nn.Conv1d(7, 20, 3, padding=1)
self.conv_view2 = torch.nn.Conv1d(20, 40, 3, padding=1)
self.addmono = addmono
self.use_mean_condition = use_mean_condition
self.predict_mean_condition = predict_mean_condition
if addmono:
self.conv_dsp1 = torch.nn.Conv1d(3 + (0 if not use_mean_condition else 1), 20, 3, padding=1)
else:
self.conv_dsp1 = torch.nn.Conv1d(2 if not use_mean_condition else 3, 20, 3, padding=1)
self.conv_dsp2 = torch.nn.Conv1d(20, 40, 3, padding=1)
self.conv = torch.nn.Conv1d(80, n_mels, 3, padding=1)
def forward(self, geowarp, view, mono, mean_condition):
# geowarp = torch.unsqueeze(geowarp, 1)
if self.addmono:
if self.use_mean_condition:
geowarp = torch.cat([geowarp, mono, mean_condition], axis=1)
else:
geowarp = torch.cat([geowarp, mono], axis=1)
geowarp = self.conv_dsp1(geowarp)
geowarp = F.leaky_relu(geowarp, 0.4)
geowarp = self.conv_dsp2(geowarp)
geowarp = F.leaky_relu(geowarp, 0.4)
view = self.conv_view1(view)
view = F.leaky_relu(view, 0.4)
view = self.conv_view2(view)
view = F.leaky_relu(view, 0.4)
x = self.conv(torch.cat([geowarp, view], axis=1))
x = F.leaky_relu(x, 0.4)
return x
class ResidualBlock(nn.Module):
def __init__(self, n_mels, residual_channels, dilation, uncond=False):
'''
:param n_mels: inplanes of conv1x1 for spectrogram conditional
:param residual_channels: audio conv
:param dilation: audio conv dilation
:param uncond: disable spectrogram conditional
'''
super().__init__()
self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation)
self.diffusion_projection = Linear(512, residual_channels)
if not uncond: # conditional model
self.conditioner_projection = Conv1d(n_mels, 2 * residual_channels, 1)
else: # unconditional model
self.conditioner_projection = None
self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1)
def forward(self, x, diffusion_step, conditioner=None):
assert (conditioner is None and self.conditioner_projection is None) or \
(conditioner is not None and self.conditioner_projection is not None)
diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
y = x + diffusion_step
if self.conditioner_projection is None: # using a unconditional model
y = self.dilated_conv(y)
else:
conditioner = self.conditioner_projection(conditioner)
y = self.dilated_conv(y) + conditioner
gate, filter = torch.chunk(y, 2, dim=1)
y = torch.sigmoid(gate) * torch.tanh(filter)
y = self.output_projection(y)
residual, skip = torch.chunk(y, 2, dim=1)
return (x + residual) / sqrt(2.0), skip
class BinauralGrad(nn.Module):
def __init__(self, params, binaural_type=""):
super().__init__()
self.params = params
self.binaural_type = binaural_type
self.loss_per_layer = getattr(params, "loss_per_layer", 0)
self.use_mean_condition = getattr(params, "use_mean_condition", False)
self.predict_mean_condition = getattr(params, "predict_mean_condition", False)
self.warper = None
if not self.predict_mean_condition:
self.input_projection = Conv1d(2, params.residual_channels, 1)
self.output_projection = Conv1d(params.residual_channels, 2, 1)
else:
self.input_projection = Conv1d(1, params.residual_channels, 1)
self.output_projection = Conv1d(params.residual_channels, 1, 1)
self.diffusion_embedding = DiffusionEmbedding(len(params.noise_schedule))
self.binaural_pre_net = BinauralPreNet(params.n_mels, binaural_type=binaural_type, addmono=getattr(params, "use_mono", False),
use_mean_condition=self.use_mean_condition,
predict_mean_condition=self.predict_mean_condition)
self.spectrogram_upsampler = None
self.residual_layers = nn.ModuleList([
ResidualBlock(params.n_mels, params.residual_channels, 2**(i % params.dilation_cycle_length), uncond=params.unconditional)
for i in range(params.residual_layers)
])
self.skip_projection = Conv1d(params.residual_channels, params.residual_channels, 1)
nn.init.zeros_(self.output_projection.weight)
def forward(self, audio, diffusion_step, spectrogram=None, geowarp=None, view=None, mono=None, mean_condition=None):
# x = audio.unsqueeze(1)
x = audio
x = self.input_projection(x)
x = F.relu(x)
diffusion_step = self.diffusion_embedding(diffusion_step)
spectrogram = self.binaural_pre_net(geowarp, view, mono, mean_condition)
skip = None
extra_output = []
for l_id, layer in enumerate(self.residual_layers):
x, skip_connection = layer(x, diffusion_step, spectrogram)
if self.loss_per_layer != 0 and l_id % self.loss_per_layer == self.loss_per_layer - 1:
extra_output.append(self.output_projection(F.relu(self.skip_projection(skip / sqrt(l_id)))))
skip = skip_connection if skip is None else skip_connection + skip
x = skip / sqrt(len(self.residual_layers))
x = self.skip_projection(x)
x = F.relu(x)
x = self.output_projection(x)
if self.loss_per_layer != 0:
return x, extra_output, geowarp
else:
return x, geowarp
| 7,085 | 35.715026 | 131 | py |
NeuralSpeech | NeuralSpeech-master/BinauralGrad/src/binauralgrad/dataset.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import os
import random
import torch
import torchaudio
from glob import glob
from torch.utils.data.distributed import DistributedSampler
import torch.nn.functional as F
class BinauralConditionalDataset(torch.utils.data.Dataset):
def __init__(self, paths, binaural_type="", predict_mean_condition=False):
super().__init__()
self.mono, self.binaural, self.binaural_geowarp, self.view = [], [], [], []
self.binaural_type = binaural_type
self.predict_mean_condition = predict_mean_condition
for subject_id in range(8):
mono, _ = torchaudio.load(f"{paths}/subject{subject_id + 1}/mono.wav")
binaural, _ = torchaudio.load(f"{paths}/subject{subject_id + 1}/binaural.wav")
binaural_geowarp, _ = torchaudio.load(f"{paths}/subject{subject_id + 1}/binaural_geowarp.wav")
# receiver is fixed at origin in this dataset, so we only need transmitter view
tx_view = np.loadtxt(f"{paths}/subject{subject_id + 1}/tx_positions.txt").transpose()
self.mono.append(mono)
self.binaural.append(binaural)
self.binaural_geowarp.append(binaural_geowarp)
self.view.append(tx_view.astype(np.float32))
# ensure that chunk_size is a multiple of 400 to match audio (48kHz) and receiver/transmitter positions (120Hz)
self.chunk_size = 2000 * 48
if self.chunk_size % 400 > 0:
self.chunk_size = self.chunk_size + 400 - self.chunk_size % 400
# compute chunks
self.chunks = []
for subject_id in range(8):
last_chunk_start_frame = self.mono[subject_id].shape[-1] - self.chunk_size + 1
hop_length = int((1 - 0.5) * self.chunk_size)
for offset in range(0, last_chunk_start_frame, hop_length):
self.chunks.append({'subject': subject_id, 'offset': offset})
def __len__(self):
return len(self.chunks)
def __getitem__(self, idx):
subject = self.chunks[idx]['subject']
offset = self.chunks[idx]['offset']
mono = self.mono[subject][:, offset:offset+self.chunk_size]
view = self.view[subject][:, offset//400:(offset+self.chunk_size)//400]
binaural = self.binaural[subject][0:2, offset:offset+self.chunk_size]
binaural_geowarp = self.binaural_geowarp[subject][0:2, offset:offset+self.chunk_size]
mean_condition = self.binaural[subject][0:2, offset:offset+self.chunk_size].mean(0, keepdim=True)
return {
'mono': mono,
'binaural': binaural,
'binaural_geowarp': binaural_geowarp,
'view': view,
'mean_condition': mean_condition,
}
class Collator:
def __init__(self, params):
self.params = params
def collate_binaural(self, minibatch):
clip_length = self.params.clip_length
for record in minibatch:
start_view = random.randint(0, record['mono'].shape[1] // 400 - clip_length // 400)
start = start_view * 400
end_view = start_view + clip_length // 400
end = end_view * 400
record['mono'] = record['mono'][:, start:end]
record['mean_condition'] = record['mean_condition'][:, start:end]
record['binaural'] = record['binaural'][:, start:end]
record['binaural_geowarp'] = record['binaural_geowarp'][:, start:end]
record['view'] = record['view'][:, start_view:end_view].T
record['view'] = np.repeat(record['view'], 400, axis=0).T
mono = np.stack([record['mono'] for record in minibatch if 'mono' in record])
mean_condition = np.stack([record['mean_condition'] for record in minibatch if 'mean_condition' in record])
binaural = np.stack([record['binaural'] for record in minibatch if 'binaural' in record])
binaural_geowarp = np.stack([record['binaural_geowarp'] for record in minibatch if 'binaural_geowarp' in record])
view = np.stack([record['view'] for record in minibatch if 'view' in record])
assert binaural_geowarp.shape[0] == view.shape[0]
return {
'mono': torch.from_numpy(mono),
'mean_condition': torch.from_numpy(mean_condition),
'audio': torch.from_numpy(binaural),
'binaural_geowarp': torch.from_numpy(binaural_geowarp),
'view': torch.from_numpy(view),
}
def from_path(data_dirs, params, binaural_type="", is_distributed=False):
if binaural_type:
dataset = BinauralConditionalDataset(data_dirs[0], binaural_type,
predict_mean_condition=getattr(params, "predict_mean_condition", False))
else:
raise ValueError("Unsupported binaural_type")
return torch.utils.data.DataLoader(
dataset,
batch_size=params.batch_size,
collate_fn=Collator(params).collate_binaural,
shuffle=not is_distributed,
num_workers=os.cpu_count(),
sampler=DistributedSampler(dataset) if is_distributed else None,
pin_memory=True,
drop_last=True)
| 4,826 | 39.90678 | 117 | py |
NeuralSpeech | NeuralSpeech-master/BinauralGrad/src/binauralgrad/mstft_loss.py | #Reference: https://github.com/csteinmetz1/auraloss
import torch
import numpy as np
import librosa.filters
import scipy.signal
class SumAndDifference(torch.nn.Module):
"""Sum and difference signal extraction module."""
def __init__(self):
"""Initialize sum and difference extraction module."""
super(SumAndDifference, self).__init__()
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, #channels, #samples).
Returns:
Tensor: Sum signal.
Tensor: Difference signal.
"""
if not (x.size(1) == 2): # inputs must be stereo
raise ValueError(f"Input must be stereo: {x.size(1)} channel(s).")
sum_sig = self.sum(x).unsqueeze(1)
diff_sig = self.diff(x).unsqueeze(1)
return sum_sig, diff_sig
@staticmethod
def sum(x):
return x[:, 0, :] + x[:, 1, :]
@staticmethod
def diff(x):
return x[:, 0, :] - x[:, 1, :]
class FIRFilter(torch.nn.Module):
"""FIR pre-emphasis filtering module.
Args:
filter_type (str): Shape of the desired FIR filter ("hp", "fd", "aw"). Default: "hp"
coef (float): Coefficient value for the filter tap (only applicable for "hp" and "fd"). Default: 0.85
ntaps (int): Number of FIR filter taps for constructing A-weighting filters. Default: 101
plot (bool): Plot the magnitude respond of the filter. Default: False
Based upon the perceptual loss pre-empahsis filters proposed by
[Wright & Välimäki, 2019](https://arxiv.org/abs/1911.08922).
A-weighting filter - "aw"
First-order highpass - "hp"
Folded differentiator - "fd"
Note that the default coefficeint value of 0.85 is optimized for
a sampling rate of 44.1 kHz, considering adjusting this value at differnt sampling rates.
"""
def __init__(self, filter_type="hp", coef=0.85, fs=44100, ntaps=101, plot=False):
"""Initilize FIR pre-emphasis filtering module."""
super(FIRFilter, self).__init__()
self.filter_type = filter_type
self.coef = coef
self.fs = fs
self.ntaps = ntaps
self.plot = plot
if ntaps % 2 == 0:
raise ValueError(f"ntaps must be odd (ntaps={ntaps}).")
if filter_type == "hp":
self.fir = torch.nn.Conv1d(1, 1, kernel_size=3, bias=False, padding=1)
self.fir.weight.requires_grad = False
self.fir.weight.data = torch.tensor([1, -coef, 0]).view(1, 1, -1)
elif filter_type == "fd":
self.fir = torch.nn.Conv1d(1, 1, kernel_size=3, bias=False, padding=1)
self.fir.weight.requires_grad = False
self.fir.weight.data = torch.tensor([1, 0, -coef]).view(1, 1, -1)
elif filter_type == "aw":
# Definition of analog A-weighting filter according to IEC/CD 1672.
f1 = 20.598997
f2 = 107.65265
f3 = 737.86223
f4 = 12194.217
A1000 = 1.9997
NUMs = [(2 * np.pi * f4) ** 2 * (10 ** (A1000 / 20)), 0, 0, 0, 0]
DENs = np.polymul(
[1, 4 * np.pi * f4, (2 * np.pi * f4) ** 2],
[1, 4 * np.pi * f1, (2 * np.pi * f1) ** 2],
)
DENs = np.polymul(
np.polymul(DENs, [1, 2 * np.pi * f3]), [1, 2 * np.pi * f2]
)
# convert analog filter to digital filter
b, a = scipy.signal.bilinear(NUMs, DENs, fs=fs)
# compute the digital filter frequency response
w_iir, h_iir = scipy.signal.freqz(b, a, worN=512, fs=fs)
# then we fit to 101 tap FIR filter with least squares
taps = scipy.signal.firls(ntaps, w_iir, abs(h_iir), fs=fs)
# now implement this digital FIR filter as a Conv1d layer
self.fir = torch.nn.Conv1d(
1, 1, kernel_size=ntaps, bias=False, padding=ntaps // 2
)
self.fir.weight.requires_grad = False
self.fir.weight.data = torch.tensor(taps.astype("float32")).view(1, 1, -1)
if plot:
from .plotting import compare_filters
compare_filters(b, a, taps, fs=fs)
def forward(self, input, target):
"""Calculate forward propagation.
Args:
input (Tensor): Predicted signal (B, #channels, #samples).
target (Tensor): Groundtruth signal (B, #channels, #samples).
Returns:
Tensor: Filtered signal.
"""
input = torch.nn.functional.conv1d(
input, self.fir.weight.data, padding=self.ntaps // 2
)
target = torch.nn.functional.conv1d(
target, self.fir.weight.data, padding=self.ntaps // 2
)
return input, target
def apply_reduction(losses, reduction="none"):
"""Apply reduction to collection of losses."""
if reduction == "mean":
losses = losses.mean()
elif reduction == "sum":
losses = losses.sum()
return losses
class SpectralConvergenceLoss(torch.nn.Module):
"""Spectral convergence loss module.
See [Arik et al., 2018](https://arxiv.org/abs/1808.06719).
"""
def __init__(self):
super(SpectralConvergenceLoss, self).__init__()
def forward(self, x_mag, y_mag):
return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro")
class STFTMagnitudeLoss(torch.nn.Module):
"""STFT magnitude loss module.
See [Arik et al., 2018](https://arxiv.org/abs/1808.06719)
and [Engel et al., 2020](https://arxiv.org/abs/2001.04643v1)
Args:
log (bool, optional): Log-scale the STFT magnitudes,
or use linear scale. Default: True
distance (str, optional): Distance function ["L1", "L2"]. Default: "L1"
reduction (str, optional): Reduction of the loss elements. Default: "mean"
"""
def __init__(self, log=True, distance="L1", reduction="mean"):
super(STFTMagnitudeLoss, self).__init__()
self.log = log
if distance == "L1":
self.distance = torch.nn.L1Loss(reduction=reduction)
elif distance == "L2":
self.distance = torch.nn.MSELoss(reduction=reduction)
else:
raise ValueError(f"Invalid distance: '{distance}'.")
def forward(self, x_mag, y_mag):
if self.log:
x_mag = torch.log(x_mag)
y_mag = torch.log(y_mag)
return self.distance(x_mag, y_mag)
class STFTLoss(torch.nn.Module):
"""STFT loss module.
See [Yamamoto et al. 2019](https://arxiv.org/abs/1904.04472).
Args:
fft_size (int, optional): FFT size in samples. Default: 1024
hop_size (int, optional): Hop size of the FFT in samples. Default: 256
win_length (int, optional): Length of the FFT analysis window. Default: 1024
window (str, optional): Window to apply before FFT, options include:
['hann_window', 'bartlett_window', 'blackman_window', 'hamming_window', 'kaiser_window']
Default: 'hann_window'
w_sc (float, optional): Weight of the spectral convergence loss term. Default: 1.0
w_log_mag (float, optional): Weight of the log magnitude loss term. Default: 1.0
w_lin_mag_mag (float, optional): Weight of the linear magnitude loss term. Default: 0.0
w_phs (float, optional): Weight of the spectral phase loss term. Default: 0.0
sample_rate (int, optional): Sample rate. Required when scale = 'mel'. Default: None
scale (str, optional): Optional frequency scaling method, options include:
['mel', 'chroma']
Default: None
n_bins (int, optional): Number of scaling frequency bins. Default: None.
scale_invariance (bool, optional): Perform an optimal scaling of the target. Default: False
eps (float, optional): Small epsilon value for stablity. Default: 1e-8
output (str, optional): Format of the loss returned.
'loss' : Return only the raw, aggregate loss term.
'full' : Return the raw loss, plus intermediate loss terms.
Default: 'loss'
reduction (str, optional): Specifies the reduction to apply to the output:
'none': no reduction will be applied,
'mean': the sum of the output will be divided by the number of elements in the output,
'sum': the output will be summed.
Default: 'mean'
device (str, optional): Place the filterbanks on specified device. Default: None
Returns:
loss:
Aggreate loss term. Only returned if output='loss'. By default.
loss, sc_mag_loss, log_mag_loss, lin_mag_loss, phs_loss:
Aggregate and intermediate loss terms. Only returned if output='full'.
"""
def __init__(
self,
fft_size=1024,
hop_size=256,
win_length=1024,
window="hann_window",
w_sc=1.0,
w_log_mag=1.0,
w_lin_mag=0.0,
w_phs=0.0,
sample_rate=None,
scale=None,
n_bins=None,
scale_invariance=False,
eps=1e-8,
output="loss",
reduction="mean",
device=None,
):
super(STFTLoss, self).__init__()
self.fft_size = fft_size
self.hop_size = hop_size
self.win_length = win_length
self.window = getattr(torch, window)(win_length)
self.w_sc = w_sc
self.w_log_mag = w_log_mag
self.w_lin_mag = w_lin_mag
self.w_phs = w_phs
self.sample_rate = sample_rate
self.scale = scale
self.n_bins = n_bins
self.scale_invariance = scale_invariance
self.eps = eps
self.output = output
self.reduction = reduction
self.device = device
self.spectralconv = SpectralConvergenceLoss()
self.logstft = STFTMagnitudeLoss(log=True, reduction=reduction)
self.linstft = STFTMagnitudeLoss(log=False, reduction=reduction)
# setup mel filterbank
if self.scale == "mel":
assert sample_rate != None # Must set sample rate to use mel scale
assert n_bins <= fft_size # Must be more FFT bins than Mel bins
fb = librosa.filters.mel(sample_rate, fft_size, n_mels=n_bins)
self.fb = torch.tensor(fb).unsqueeze(0)
elif self.scale == "chroma":
assert sample_rate != None # Must set sample rate to use chroma scale
assert n_bins <= fft_size # Must be more FFT bins than chroma bins
fb = librosa.filters.chroma(sample_rate, fft_size, n_chroma=n_bins)
self.fb = torch.tensor(fb).unsqueeze(0)
if scale is not None and device is not None:
self.fb = self.fb.to(self.device) # move filterbank to device
def stft(self, x):
"""Perform STFT.
Args:
x (Tensor): Input signal tensor (B, T).
Returns:
Tensor: x_mag, x_phs
Magnitude and phase spectra (B, fft_size // 2 + 1, frames).
"""
x_stft = torch.stft(
x,
self.fft_size,
self.hop_size,
self.win_length,
self.window,
return_complex=True,
)
x_mag = torch.sqrt(
torch.clamp((x_stft.real ** 2) + (x_stft.imag ** 2), min=self.eps)
)
# x_phs = torch.angle(x_stft)
return x_mag, x_stft
def forward(self, x, y):
# compute the magnitude and phase spectra of input and target
self.window = self.window.to(x.device)
x_mag, x_phs = self.stft(x.view(-1, x.size(-1)))
y_mag, y_phs = self.stft(y.view(-1, y.size(-1)))
# apply relevant transforms
if self.scale is not None:
x_mag = torch.matmul(self.fb, x_mag)
y_mag = torch.matmul(self.fb, y_mag)
# normalize scales
if self.scale_invariance:
alpha = (x_mag * y_mag).sum([-2, -1]) / ((y_mag ** 2).sum([-2, -1]))
y_mag = y_mag * alpha.unsqueeze(-1)
# compute loss terms
sc_mag_loss = self.spectralconv(x_mag, y_mag) if self.w_sc else 0.0
log_mag_loss = self.logstft(x_mag, y_mag) if self.w_log_mag else 0.0
lin_mag_loss = self.linstft(x_mag, y_mag) if self.w_lin_mag else 0.0
if self.w_phs:
ignore_below = 0.1
data = torch.cat([x_phs.real.unsqueeze(-1), x_phs.imag.unsqueeze(-1)], dim=-1).view(-1, 2)
target = torch.cat([y_phs.real.unsqueeze(-1), y_phs.imag.unsqueeze(-1)], dim=-1).view(-1, 2)
# ignore low energy components for numerical stability
target_energy = torch.sum(torch.abs(target), dim=-1)
pred_energy = torch.sum(torch.abs(data), dim=-1)
target_mask = target_energy > ignore_below * torch.mean(target_energy)
pred_mask = pred_energy > ignore_below * torch.mean(target_energy)
indices = torch.nonzero(target_mask * pred_mask).view(-1)
data, target = torch.index_select(data, 0, indices), torch.index_select(target, 0, indices)
# compute actual phase loss in angular space
data_angles, target_angles = torch.atan2(data[:, 0], data[:, 1]), torch.atan2(target[:, 0], target[:, 1])
loss = torch.abs(data_angles - target_angles)
# positive + negative values in left part of coordinate system cause angles > pi
# => 2pi -> 0, 3/4pi -> 1/2pi, ... (triangle function over [0, 2pi] with peak at pi)
loss = np.pi - torch.abs(loss - np.pi)
phs_loss = torch.mean(loss)
else:
phs_loss = 0.0
# combine loss terms
loss = (
(self.w_sc * sc_mag_loss)
+ (self.w_log_mag * log_mag_loss)
+ (self.w_lin_mag * lin_mag_loss)
+ (self.w_phs * phs_loss)
)
loss = apply_reduction(loss, reduction=self.reduction)
if self.output == "loss":
return loss
elif self.output == "full":
return loss, sc_mag_loss, log_mag_loss, lin_mag_loss, phs_loss
class MelSTFTLoss(STFTLoss):
"""Mel-scale STFT loss module."""
def __init__(
self,
sample_rate,
fft_size=1024,
hop_size=256,
win_length=1024,
window="hann_window",
w_sc=1.0,
w_log_mag=1.0,
w_lin_mag=0.0,
w_phs=0.0,
n_mels=128,
**kwargs,
):
super(MelSTFTLoss, self).__init__(
fft_size,
hop_size,
win_length,
window,
w_sc,
w_log_mag,
w_lin_mag,
w_phs,
sample_rate,
"mel",
n_mels,
**kwargs,
)
class ChromaSTFTLoss(STFTLoss):
"""Chroma-scale STFT loss module."""
def __init__(
self,
sample_rate,
fft_size=1024,
hop_size=256,
win_length=1024,
window="hann_window",
w_sc=1.0,
w_log_mag=1.0,
w_lin_mag=0.0,
w_phs=0.0,
n_chroma=12,
**kwargs,
):
super(ChromaSTFTLoss, self).__init__(
fft_size,
hop_size,
win_length,
window,
w_sc,
w_log_mag,
w_lin_mag,
w_phs,
sample_rate,
"chroma",
n_chroma,
**kwargs,
)
class MultiResolutionSTFTLoss(torch.nn.Module):
"""Multi resolution STFT loss module.
See [Yamamoto et al., 2019](https://arxiv.org/abs/1910.11480)
Args:
fft_sizes (list): List of FFT sizes.
hop_sizes (list): List of hop sizes.
win_lengths (list): List of window lengths.
window (str, optional): Window to apply before FFT, options include:
'hann_window', 'bartlett_window', 'blackman_window', 'hamming_window', 'kaiser_window']
Default: 'hann_window'
w_sc (float, optional): Weight of the spectral convergence loss term. Default: 1.0
w_log_mag (float, optional): Weight of the log magnitude loss term. Default: 1.0
w_lin_mag (float, optional): Weight of the linear magnitude loss term. Default: 0.0
w_phs (float, optional): Weight of the spectral phase loss term. Default: 0.0
sample_rate (int, optional): Sample rate. Required when scale = 'mel'. Default: None
scale (str, optional): Optional frequency scaling method, options include:
['mel', 'chroma']
Default: None
n_bins (int, optional): Number of mel frequency bins. Required when scale = 'mel'. Default: None.
scale_invariance (bool, optional): Perform an optimal scaling of the target. Default: False
"""
def __init__(
self,
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window",
w_sc=1.0,
w_log_mag=1.0,
w_lin_mag=0.0,
w_phs=0.0,
sample_rate=None,
scale=None,
n_bins=None,
scale_invariance=False,
**kwargs,
):
super(MultiResolutionSTFTLoss, self).__init__()
assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) # must define all
self.fft_sizes = fft_sizes
self.hop_sizes = hop_sizes
self.win_lengths = win_lengths
self.stft_losses = torch.nn.ModuleList()
for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):
self.stft_losses += [
STFTLoss(
fs,
ss,
wl,
window,
w_sc,
w_log_mag,
w_lin_mag,
w_phs,
sample_rate,
scale,
n_bins,
scale_invariance,
**kwargs,
)
]
def forward(self, x, y):
mrstft_loss = 0.0
sc_mag_loss, log_mag_loss, lin_mag_loss, phs_loss = [], [], [], []
for f in self.stft_losses:
if f.output == "full": # extract just first term
tmp_loss = f(x, y)
mrstft_loss += tmp_loss[0]
sc_mag_loss.append(tmp_loss[1])
log_mag_loss.append(tmp_loss[2])
lin_mag_loss.append(tmp_loss[3])
phs_loss.append(tmp_loss[4])
else:
mrstft_loss += f(x, y)
mrstft_loss /= len(self.stft_losses)
if f.output == "loss":
return mrstft_loss
else:
return mrstft_loss, sc_mag_loss, log_mag_loss, lin_mag_loss, phs_loss
class RandomResolutionSTFTLoss(torch.nn.Module):
"""Random resolution STFT loss module.
See [Steinmetz & Reiss, 2020](https://www.christiansteinmetz.com/s/DMRN15__auraloss__Audio_focused_loss_functions_in_PyTorch.pdf)
Args:
resolutions (int): Total number of STFT resolutions.
min_fft_size (int): Smallest FFT size.
max_fft_size (int): Largest FFT size.
min_hop_size (int): Smallest hop size as porportion of window size.
min_hop_size (int): Largest hop size as porportion of window size.
window (str): Window function type.
randomize_rate (int): Number of forwards before STFTs are randomized.
"""
def __init__(
self,
resolutions=3,
min_fft_size=16,
max_fft_size=32768,
min_hop_size=0.1,
max_hop_size=1.0,
windows=[
"hann_window",
"bartlett_window",
"blackman_window",
"hamming_window",
"kaiser_window",
],
w_sc=1.0,
w_log_mag=1.0,
w_lin_mag=0.0,
w_phs=0.0,
sample_rate=None,
scale=None,
n_mels=None,
randomize_rate=1,
**kwargs,
):
super(RandomResolutionSTFTLoss, self).__init__()
self.resolutions = resolutions
self.min_fft_size = min_fft_size
self.max_fft_size = max_fft_size
self.min_hop_size = min_hop_size
self.max_hop_size = max_hop_size
self.windows = windows
self.randomize_rate = randomize_rate
self.w_sc = w_sc
self.w_log_mag = w_log_mag
self.w_lin_mag = w_lin_mag
self.w_phs = w_phs
self.sample_rate = sample_rate
self.scale = scale
self.n_mels = n_mels
self.nforwards = 0
self.randomize_losses() # init the losses
def randomize_losses(self):
# clear the existing STFT losses
self.stft_losses = torch.nn.ModuleList()
for n in range(self.resolutions):
frame_size = 2 ** np.random.randint(
np.log2(self.min_fft_size), np.log2(self.max_fft_size)
)
hop_size = int(
frame_size
* (
self.min_hop_size
+ (np.random.rand() * (self.max_hop_size - self.min_hop_size))
)
)
window_length = int(frame_size * np.random.choice([1.0, 0.5, 0.25]))
window = np.random.choice(self.windows)
self.stft_losses += [
STFTLoss(
frame_size,
hop_size,
window_length,
window,
self.w_sc,
self.w_log_mag,
self.w_lin_mag,
self.w_phs,
self.sample_rate,
self.scale,
self.n_mels,
)
]
def forward(self, input, target):
if input.size(-1) <= self.max_fft_size:
raise ValueError(
f"Input length ({input.size(-1)}) must be larger than largest FFT size ({self.max_fft_size})."
)
elif target.size(-1) <= self.max_fft_size:
raise ValueError(
f"Target length ({target.size(-1)}) must be larger than largest FFT size ({self.max_fft_size})."
)
if self.nforwards % self.randomize_rate == 0:
self.randomize_losses()
loss = 0.0
for f in self.stft_losses:
loss += f(input, target)
loss /= len(self.stft_losses)
self.nforwards += 1
return loss
class SumAndDifferenceSTFTLoss(torch.nn.Module):
"""Sum and difference sttereo STFT loss module.
See [Steinmetz et al., 2020](https://arxiv.org/abs/2010.10291)
Args:
fft_sizes (list, optional): List of FFT sizes.
hop_sizes (list, optional): List of hop sizes.
win_lengths (list, optional): List of window lengths.
window (str, optional): Window function type.
w_sum (float, optional): Weight of the sum loss component. Default: 1.0
w_diff (float, optional): Weight of the difference loss component. Default: 1.0
output (str, optional): Format of the loss returned.
'loss' : Return only the raw, aggregate loss term.
'full' : Return the raw loss, plus intermediate loss terms.
Default: 'loss'
Returns:
loss:
Aggreate loss term. Only returned if output='loss'.
loss, sum_loss, diff_loss:
Aggregate and intermediate loss terms. Only returned if output='full'.
"""
def __init__(
self,
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window",
w_sum=1.0,
w_diff=1.0,
output="loss",
):
super(SumAndDifferenceSTFTLoss, self).__init__()
self.sd = SumAndDifference()
self.w_sum = 1.0
self.w_diff = 1.0
self.output = output
self.mrstft = MultiResolutionSTFTLoss(fft_sizes, hop_sizes, win_lengths, window)
def forward(self, input, target):
input_sum, input_diff = self.sd(input)
target_sum, target_diff = self.sd(target)
sum_loss = self.mrstft(input_sum, target_sum)
diff_loss = self.mrstft(input_diff, target_diff)
loss = ((self.w_sum * sum_loss) + (self.w_diff * diff_loss)) / 2
if self.output == "loss":
return loss
elif self.output == "full":
return loss, sum_loss, diff_loss
| 24,435 | 35.690691 | 133 | py |
NeuralSpeech | NeuralSpeech-master/BinauralGrad/src/binauralgrad/train.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from argparse import ArgumentParser
from torch.cuda import device_count
from torch.multiprocessing import spawn
from binauralgrad.learner import train, train_distributed
import binauralgrad.params as params_all
def _get_free_port():
import socketserver
with socketserver.TCPServer(('localhost', 0), None) as s:
return s.server_address[1]
def main(args):
replica_count = device_count()
params = getattr(params_all, args.params)
if replica_count > 1:
if params.batch_size % replica_count != 0:
raise ValueError(f'Batch size {params.batch_size} is not evenly divisble by # GPUs {replica_count}.')
params.batch_size = params.batch_size // replica_count
port = _get_free_port()
spawn(train_distributed, args=(replica_count, port, args, params), nprocs=replica_count, join=True)
else:
train(args, params)
if __name__ == '__main__':
parser = ArgumentParser(description='train (or resume training) a BinauralGrad model')
parser.add_argument('model_dir',
help='directory in which to store model checkpoints and training logs')
parser.add_argument('data_dirs', nargs='+',
help='space separated list of directories from which to read .wav files for training')
parser.add_argument('--max_steps', default=None, type=int,
help='maximum number of training steps')
parser.add_argument('--fp16', action='store_true', default=False,
help='use 16-bit floating point operations for training')
parser.add_argument('--binaural-type', default="", type=str,
help='maximum number of training steps')
parser.add_argument('--params', default="params", type=str,
help='maximum number of training steps')
main(parser.parse_args())
| 1,779 | 37.695652 | 107 | py |
NeuralSpeech | NeuralSpeech-master/AdapterASR/e2e_asr_adaptertransformer.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Transformer speech recognition model (pytorch)."""
from argparse import Namespace
from distutils.util import strtobool
import logging
import math
import numpy
import torch
from espnet.nets.pytorch_backend.e2e_asr_transformer import *
from espnet.nets.pytorch_backend.nets_utils import *
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.transformer.decoder_layer import DecoderLayer
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.e2e_asr_transformer import E2E as E2ETransformer
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import (
LabelSmoothingLoss, # noqa: H301
)
from data_load import low_resource_languages
low_resource_adapter_dim = 64
high_resource_adapter_dim = 128
class SimAdapter(MultiHeadedAttention):
def __init__(self, n_feat, dropout_rate, fusion_languages=None, num_shared_layers=-1):
"""Construct an MultiHeadedAttention object."""
super(MultiHeadedAttention, self).__init__()
self.linear_q = torch.nn.Linear(n_feat, n_feat)
self.linear_k = torch.nn.Linear(n_feat, n_feat)
self.linear_v = torch.nn.Linear(n_feat, n_feat, bias=False)
self.linear_v.weight.data = (
torch.zeros(n_feat, n_feat) + 0.000001
).fill_diagonal_(1.0)
self.attn = None
self.num_shared_layers = num_shared_layers
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.temperature = 1.0
self.fusion_languages = fusion_languages
def forward_qkv(self, query, key, value):
q = self.linear_q(query) # (batch, time, d_k)
k = self.linear_k(key) # (batch, time, n_adapters, d_k)
v = self.linear_v(value) # (batch, time, n_adapters, d_k)
return q, k, v
def forward_attention(self, value, scores):
self.attn = torch.softmax(scores, dim=-1) # (batch, time, n_adapters)
p_attn = self.dropout(self.attn)
x = torch.matmul(p_attn.unsqueeze(2), value)
# (batch, time, 1, n_adapters), (batch, time, n_adapters, d_k)
x = torch.squeeze(x, dim=2)
return x # (batch, time, d_k)
def forward(self, query, key, value, residual=None):
q, k, v = self.forward_qkv(query, key, value)
# q: (batch, time, 1, d_k); k, v: (batch, time, n_adapters, d_k)
scores = torch.matmul(q.unsqueeze(2), k.transpose(-2, -1)) / math.sqrt(self.temperature)
scores = torch.squeeze(scores, dim=2)
# scores: (batch, time, n_adapters)
out = self.forward_attention(v, scores)
if residual is not None:
out = out + residual
return out
class Adapter(torch.nn.Module):
def __init__(self, adapter_dim, embed_dim):
super().__init__()
self.layer_norm = LayerNorm(embed_dim)
self.down_project = torch.nn.Linear(embed_dim, adapter_dim, bias=False)
self.up_project = torch.nn.Linear(adapter_dim, embed_dim, bias=False)
def forward(self, z):
normalized_z = self.layer_norm(z)
h = torch.nn.functional.relu(self.down_project(normalized_z))
return self.up_project(h) + z
class CustomEncoderLayer(EncoderLayer):
def forward(self, x, mask, cache=None):
residual = x
if self.normalize_before:
x = self.norm1(x)
if cache is None:
x_q = x
else:
assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
x_q = x[:, -1:, :]
residual = residual[:, -1:, :]
mask = None if mask is None else mask[:, -1:, :]
if self.concat_after:
x_concat = torch.cat((x, self.self_attn(x_q, x, x, mask)), dim=-1)
x = residual + self.concat_linear(x_concat)
else:
x = residual + self.dropout(self.self_attn(x_q, x, x, mask))
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x_norm = x
x = residual + self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm2(x)
if cache is not None:
x = torch.cat([cache, x], dim=1)
return x, x_norm, mask
class AdaptiveEncoderLayer(CustomEncoderLayer):
def __init__(
self,
languages,
size,
self_attn,
feed_forward,
dropout_rate,
normalize_before=True,
concat_after=False,
sim_adapter_layer=None,
shared_adapter=None,
use_adapters=True,
):
super().__init__(size,
self_attn,
feed_forward,
dropout_rate,
normalize_before,
concat_after,
)
self.use_adapters = use_adapters
if use_adapters:
self.adapters = torch.nn.ModuleDict()
self.shared_adapter = shared_adapter
if shared_adapter:
languages = [shared_adapter]
for lang in languages:
if lang in low_resource_languages or self.shared_adapter:
adapter_dim = low_resource_adapter_dim
else:
adapter_dim = high_resource_adapter_dim
self.adapters[lang] = Adapter(adapter_dim, size)
self.sim_adapter = sim_adapter_layer
def forward(self, x, mask, language, cache=None, use_sim_adapter=True):
x, x_norm, mask = super().forward(x, mask, cache=cache)
if not self.use_adapters:
return x, mask, language, cache, use_sim_adapter
if self.shared_adapter:
assert len(self.adapters.keys()) == 1
language = list(self.adapters.keys())[0]
if (not use_sim_adapter) or not (self.sim_adapter):
out = self.adapters[language](x)
else:
out = []
fusion_languages = list(self.sim_adapter.keys())[0]
for lang in fusion_languages.split("_"):
if lang != "self":
out.append(self.adapters[lang](x))
else:
out.append(x)
out = torch.stack(out).permute(1, 2, 0, 3) # B, T, n_adapters, F
out = self.sim_adapter[fusion_languages](x, out, out, residual=x_norm)
#out = self.adapters['cs'](x)
return out, mask, language, cache, use_sim_adapter
class AdaptiveEncoder(Encoder):
def __init__(
self,
languages,
idim,
selfattention_layer_type="selfattn",
attention_dim=256,
attention_heads=4,
conv_wshare=4,
conv_kernel_length=11,
conv_usebias=False,
linear_units=2048,
num_blocks=6,
dropout_rate=0.1,
positional_dropout_rate=0.1,
attention_dropout_rate=0.0,
input_layer="conv2d",
pos_enc_class=PositionalEncoding,
normalize_before=True,
concat_after=False,
positionwise_layer_type="linear",
positionwise_conv_kernel_size=1,
padding_idx=-1,
sim_adapter=False,
shared_adapter=None,
use_adapters=True,
fusion_languages=None,
):
super().__init__(idim,
selfattention_layer_type,
attention_dim,
attention_heads,
conv_wshare,
conv_kernel_length,
conv_usebias,
linear_units,
num_blocks,
dropout_rate,
positional_dropout_rate,
attention_dropout_rate,
input_layer,
pos_enc_class,
normalize_before,
concat_after,
positionwise_layer_type,
positionwise_conv_kernel_size,
padding_idx)
positionwise_layer, positionwise_layer_args = self.get_positionwise_layer(
positionwise_layer_type,
attention_dim,
linear_units,
dropout_rate,
positionwise_conv_kernel_size,
)
if selfattention_layer_type == "selfattn":
logging.info("encoder self-attention layer type = self-attention")
self.encoders = repeat(
num_blocks,
lambda lnum: AdaptiveEncoderLayer(
languages,
attention_dim,
MultiHeadedAttention(
attention_heads, attention_dim, attention_dropout_rate
),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
normalize_before,
concat_after,
torch.nn.ModuleDict({"_".join(sorted(fusion_languages)): SimAdapter(attention_dim, attention_dropout_rate, fusion_languages)}) if sim_adapter else None,
shared_adapter,
use_adapters,
),
)
else:
raise NotImplementedError("Only support self-attention encoder layer")
def forward(self, xs, masks, language, use_sim_adapter=True):
xs, masks = self.embed(xs, masks)
xs, masks, _, _, _ = self.encoders(xs, masks, language, None, use_sim_adapter)
if self.normalize_before:
xs = self.after_norm(xs)
return xs, masks
class CustomDecoderLayer(DecoderLayer):
def forward(self, tgt, tgt_mask, memory, memory_mask, cache=None):
residual = tgt
if self.normalize_before:
tgt = self.norm1(tgt)
if cache is None:
tgt_q = tgt
tgt_q_mask = tgt_mask
else:
# compute only the last frame query keeping dim: max_time_out -> 1
assert cache.shape == (
tgt.shape[0],
tgt.shape[1] - 1,
self.size,
), f"{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}"
tgt_q = tgt[:, -1:, :]
residual = residual[:, -1:, :]
tgt_q_mask = None
if tgt_mask is not None:
tgt_q_mask = tgt_mask[:, -1:, :]
if self.concat_after:
tgt_concat = torch.cat(
(tgt_q, self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)), dim=-1
)
x = residual + self.concat_linear1(tgt_concat)
else:
x = residual + self.dropout(self.self_attn(tgt_q, tgt, tgt, tgt_q_mask))
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
if self.concat_after:
x_concat = torch.cat(
(x, self.src_attn(x, memory, memory, memory_mask)), dim=-1
)
x = residual + self.concat_linear2(x_concat)
else:
x = residual + self.dropout(self.src_attn(x, memory, memory, memory_mask))
if not self.normalize_before:
x = self.norm2(x)
residual = x
if self.normalize_before:
x = self.norm3(x)
x_norm = x
x = residual + self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm3(x)
if cache is not None:
x = torch.cat([cache, x], dim=1)
return x, x_norm, tgt_mask, memory, memory_mask
class AdaptiveDecoderLayer(CustomDecoderLayer):
def __init__(
self,
languages,
size,
self_attn,
src_attn,
feed_forward,
dropout_rate,
normalize_before=True,
concat_after=False,
sim_adapter_layer=None,
shared_adapter=None,
use_adapters=True,
):
super().__init__(size,
self_attn,
src_attn,
feed_forward,
dropout_rate,
normalize_before,
concat_after
)
self.use_adapters = use_adapters
if use_adapters:
self.adapters = torch.nn.ModuleDict()
self.shared_adapter = shared_adapter
if shared_adapter:
languages = [shared_adapter]
for lang in languages:
if lang in low_resource_languages or self.shared_adapter:
adapter_dim = low_resource_adapter_dim
else:
adapter_dim = high_resource_adapter_dim
self.adapters[lang] = Adapter(adapter_dim, size)
self.sim_adapter = sim_adapter_layer
def forward(self, tgt, tgt_mask, memory, memory_mask, language, cache=None, use_sim_adapter=True):
x, x_norm, tgt_mask, memory, memory_mask = super().forward(tgt, tgt_mask, memory, memory_mask, cache=cache)
if not self.use_adapters:
return x, tgt_mask, memory, memory_mask, language, cache, use_sim_adapter
if self.shared_adapter:
assert len(self.adapters.keys()) == 1
language = list(self.adapters.keys())[0]
if (not use_sim_adapter) or (not self.sim_adapter):
out = self.adapters[language](x)
else:
out = []
fusion_languages = list(self.sim_adapter.keys())[0]
for lang in fusion_languages.split("_"):
if lang != "self":
out.append(self.adapters[lang](x))
else:
out.append(x)
out = torch.stack(out).permute(1, 2, 0, 3) # B, T, n_adapters, F
out = self.sim_adapter[fusion_languages](x, out, out, residual=x_norm)
return out, tgt_mask, memory, memory_mask, language, cache, use_sim_adapter
class AdaptiveDecoder(Decoder):
def __init__(
self,
languages,
odim_dict,
selfattention_layer_type="selfattn",
attention_dim=256,
attention_heads=4,
conv_wshare=4,
conv_kernel_length=11,
conv_usebias=False,
linear_units=2048,
num_blocks=6,
dropout_rate=0.1,
positional_dropout_rate=0.1,
self_attention_dropout_rate=0.0,
src_attention_dropout_rate=0.0,
input_layer="embed",
use_output_layer=True,
pos_enc_class=PositionalEncoding,
normalize_before=True,
concat_after=False,
sim_adapter=False,
shared_adapter=False,
use_adapters=True,
fusion_languages=None,
):
super().__init__(1,
selfattention_layer_type,
attention_dim,
attention_heads,
conv_wshare,
conv_kernel_length,
conv_usebias,
linear_units,
num_blocks,
dropout_rate,
positional_dropout_rate,
self_attention_dropout_rate,
src_attention_dropout_rate,
input_layer,
use_output_layer,
pos_enc_class,
normalize_before,
concat_after)
if input_layer == "embed":
self.embed = torch.nn.ModuleDict()
for lang in odim_dict.keys():
self.embed[lang] = torch.nn.Sequential(
torch.nn.Embedding(odim_dict[lang], attention_dim),
pos_enc_class(attention_dim, positional_dropout_rate),
)
else:
raise NotImplementedError("only support embed embedding layer")
assert self_attention_dropout_rate == src_attention_dropout_rate
if selfattention_layer_type == "selfattn":
logging.info("decoder self-attention layer type = self-attention")
self.decoders = repeat(
num_blocks,
lambda lnum: AdaptiveDecoderLayer(
languages,
attention_dim,
MultiHeadedAttention(
attention_heads, attention_dim, self_attention_dropout_rate
),
MultiHeadedAttention(
attention_heads, attention_dim, src_attention_dropout_rate
),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
dropout_rate,
normalize_before,
concat_after,
torch.nn.ModuleDict({"_".join(sorted(fusion_languages)): SimAdapter(attention_dim, self_attention_dropout_rate, fusion_languages)}) if sim_adapter else None,
shared_adapter,
use_adapters,
),
)
else:
raise NotImplementedError("Only support self-attention decoder layer")
if use_output_layer:
self.output_layer = torch.nn.ModuleDict()
for lang in odim_dict.keys():
self.output_layer[lang] = torch.nn.Linear(attention_dim, odim_dict[lang])
else:
self.output_layer = None
def forward(self, tgt, tgt_mask, memory, memory_mask, language, use_sim_adapter=True):
x = self.embed[language](tgt)
x, tgt_mask, memory, memory_mask, _, _, _ = self.decoders(
x, tgt_mask, memory, memory_mask, language, None, use_sim_adapter
)
if self.normalize_before:
x = self.after_norm(x)
if self.output_layer is not None:
x = self.output_layer[language](x)
return x, tgt_mask
def forward_one_step(self, tgt, tgt_mask, memory, memory_mask, language, cache=None):
x = self.embed[language](tgt)
if cache is None:
cache = [None] * len(self.decoders)
new_cache = []
for c, decoder in zip(cache, self.decoders):
x, tgt_mask, memory, memory_mask, _, _, _ = decoder(
x, tgt_mask, memory, None, language, cache=c,
)
new_cache.append(x)
if self.normalize_before:
y = self.after_norm(x[:, -1])
else:
y = x[:, -1]
if self.output_layer is not None:
y = torch.log_softmax(self.output_layer[language](y), dim=-1)
return y, new_cache
class E2E(E2ETransformer):
def __init__(self, idim, odim_dict, args, languages, ignore_id=-1):
super().__init__(idim, 1, args, ignore_id)
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
if args.fusion_languages:
args.fusion_languages = args.fusion_languages.split("_")
self.fusion_languages = sorted(args.fusion_languages) if args.fusion_languages else sorted(languages)
self.encoder = AdaptiveEncoder(
languages=languages,
idim=idim,
selfattention_layer_type=args.transformer_encoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_encoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=args.transformer_input_layer,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
sim_adapter=args.sim_adapter,
shared_adapter=args.shared_adapter,
use_adapters=args.use_adapters,
fusion_languages=self.fusion_languages
)
if args.mtlalpha < 1:
self.decoder = AdaptiveDecoder(
languages=languages,
odim_dict=odim_dict,
selfattention_layer_type=args.transformer_decoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_decoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
sim_adapter=args.sim_adapter,
shared_adapter=args.shared_adapter,
use_adapters=args.use_adapters,
fusion_languages=self.fusion_languages,
)
else:
self.decoder = None
self.soss = {lang: odim_dict[lang] - 1 for lang in languages}
self.eoss = {lang: odim_dict[lang] - 1 for lang in languages}
self.odim_dict = odim_dict
self.criterion = torch.nn.ModuleDict()
for lang in languages:
self.criterion[lang] = LabelSmoothingLoss(
self.odim_dict[lang],
self.ignore_id,
args.lsm_weight,
args.transformer_length_normalized_loss,
)
if args.mtlalpha > 0.0:
self.ctc = torch.nn.ModuleDict()
for lang in languages:
self.ctc[lang] = CTC(
odim_dict[lang], args.adim, args.dropout_rate, ctc_type=args.ctc_type, reduce=True
)
else:
self.ctc = None
if args.report_cer or args.report_wer:
self.error_calculator = ErrorCalculator(
args.char_list,
args.sym_space,
args.sym_blank,
args.report_cer,
args.report_wer,
)
else:
self.error_calculator = None
self.reset_parameters(args)
# Adapter
self.meta_train = args.meta_train
self.shared_adapter = args.shared_adapter
self.sim_adapter = False
self.use_adapters = args.use_adapters
if self.use_adapters:
for p in self.parameters():
p.requires_grad = False
self.sim_adapter = args.sim_adapter
if not args.sim_adapter:
adapter_train_languages = args.adapter_train_languages
self.enable_adapter_training(adapter_train_languages,
shared_adapter=args.shared_adapter, enable_head=args.train_adapter_with_head)
else:
self.reset_sim_adapter_parameters()
self.enable_sim_adapter_training()
self.recognize_language_branch = None # Set default recognize language for decoding
def reset_sim_adapter_parameters(self):
key = "_".join(self.fusion_languages)
for layer in self.encoder.encoders:
layer.sim_adapter[key].linear_v.weight.data = (
torch.zeros(self.adim, self.adim) + 0.000001
).fill_diagonal_(1.0)
for layer in self.decoder.decoders:
layer.sim_adapter[key].linear_v.weight.data = (
torch.zeros(self.adim, self.adim) + 0.000001
).fill_diagonal_(1.0)
def enable_sim_adapter_training(self):
key = "_".join(self.fusion_languages)
logging.warning(f"Unfreezing the SimAdapter parameters: {key}")
for layer in self.encoder.encoders:
for p in layer.sim_adapter[key].parameters():
p.requires_grad = True
# for p in layer.adapter_norm.parameters():
# p.requires_grad = True
for layer in self.decoder.decoders:
for p in layer.sim_adapter[key].parameters():
p.requires_grad = True
def get_fusion_guide_loss(self, language):
device = next(self.parameters()).device
if language not in self.fusion_languages:
return torch.tensor(0.0).to(device)
guide_loss = torch.tensor(0.0).to(device)
loss_fn = torch.nn.CrossEntropyLoss(reduction='mean')
lang_id = sorted(self.fusion_languages).index(language)
key = "_".join(self.fusion_languages)
target = torch.tensor(lang_id).unsqueeze(0).to(device)
for layer in self.encoder.encoders:
logits = layer.sim_adapter[key].attn.mean(axis=(0, 1)).unsqueeze(0) # (batch, time, n_adapters)
guide_loss = guide_loss + loss_fn(logits.exp(), target)
layer.sim_adapter.attn = None
for layer in self.decoder.decoders:
logits = layer.sim_adapter[key].attn.mean(axis=(0, 1)).unsqueeze(0) # (batch, time, n_adapters)
guide_loss = guide_loss + loss_fn(logits.exp(), target)
layer.sim_adapter[key].attn = None
return guide_loss
def get_fusion_regularization_loss(self):
reg_loss = 0.0
fusion_reg_loss_weight = 0.01
device = next(self.parameters()).device
key = "_".join(self.fusion_languages)
target = torch.zeros((self.adim, self.adim)).fill_diagonal_(1.0).to(device)
for layer in self.encoder.encoders:
reg_loss = reg_loss + fusion_reg_loss_weight * (target - layer.sim_adapter[key].linear_v.weight).pow(2).sum()
for layer in self.decoder.decoders:
reg_loss = reg_loss + fusion_reg_loss_weight * (target - layer.sim_adapter[key].linear_v.weight).pow(2).sum()
return reg_loss
def enable_adapter_training(self, specified_languages=None, shared_adapter=False, enable_head=False):
# Unfreeze the adapter parameters
if specified_languages:
enable_languages = specified_languages
else:
enable_languages = self.criterion.keys()
logging.warning(f"Unfreezing the adapter parameters of {' '.join(enable_languages)}")
for lang in enable_languages:
if enable_head:
for p in self.decoder.embed[lang].parameters():
p.requires_grad = True
for p in self.decoder.output_layer[lang].parameters():
p.requires_grad = True
for p in self.ctc[lang].parameters():
p.requires_grad = True
if shared_adapter:
lang = shared_adapter
for layer in self.encoder.encoders:
for p in layer.adapters[lang].parameters():
p.requires_grad = True
for layer in self.decoder.decoders:
for p in layer.adapters[lang].parameters():
p.requires_grad = True
def forward(self, xs_pad, ilens, ys_pad, language, use_sim_adapter=True):
# 1. forward encoder
xs_pad = xs_pad[:, : max(ilens)] # for data parallel
src_mask = make_non_pad_mask(ilens.tolist()).to(xs_pad.device).unsqueeze(-2)
hs_pad, hs_mask = self.encoder(xs_pad, src_mask, language, use_sim_adapter=True)
self.hs_pad = hs_pad
# 2. forward decoder
if self.decoder is not None:
ys_in_pad, ys_out_pad = add_sos_eos(
ys_pad, self.soss[language], self.eoss[language], self.ignore_id
)
ys_mask = target_mask(ys_in_pad, self.ignore_id)
pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask, language, use_sim_adapter=True)
self.pred_pad = pred_pad
# 3. compute attention loss
loss_att = self.criterion[language](pred_pad, ys_out_pad)
self.acc = th_accuracy(
pred_pad.view(-1, self.odim_dict[language]), ys_out_pad, ignore_label=self.ignore_id
)
else:
loss_att = None
self.acc = None
# TODO(karita) show predicted text
# TODO(karita) calculate these stats
cer_ctc = None
if self.mtlalpha == 0.0:
loss_ctc = None
else:
batch_size = xs_pad.size(0)
hs_len = hs_mask.view(batch_size, -1).sum(1)
ctc_pred_pad = self.ctc[language].ctc_lo(hs_pad)
loss_ctc = self.ctc[language](hs_pad.view(batch_size, -1, self.adim), hs_len, ys_pad)
if not self.training and self.error_calculator is not None:
ys_hat = self.ctc[language].argmax(hs_pad.view(batch_size, -1, self.adim)).data
cer_ctc = self.error_calculator[language](ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
# for visualization
if not self.training:
self.ctc[language].softmax(hs_pad)
# 5. compute cer/wer
if self.training or self.error_calculator is None or self.decoder is None:
cer, wer = None, None
else:
ys_hat = pred_pad.argmax(dim=-1)
cer, wer = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
# copied from e2e_asr
alpha = self.mtlalpha
if alpha == 0:
self.loss = loss_att
loss_att_data = float(loss_att)
loss_ctc_data = None
elif alpha == 1:
self.loss = loss_ctc
loss_att_data = None
loss_ctc_data = float(loss_ctc)
else:
self.loss = alpha * loss_ctc + (1 - alpha) * loss_att
loss_att_data = float(loss_att)
loss_ctc_data = float(loss_ctc)
if self.sim_adapter and self.training:
guide_loss = self.get_fusion_guide_loss(language)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_ctc_data, loss_att_data, self.acc, cer_ctc, cer, wer, loss_data
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
if self.training and not self.meta_train:
if not self.sim_adapter:
guide_loss = torch.tensor(0.0).cuda()
return (self.loss, guide_loss)
return self.loss
def calculate_all_ctc_probs(self, xs_pad, ilens, ys_pad, language):
"""E2E CTC probability calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: CTC probability (B, Tmax, vocab)
:rtype: float ndarray
"""
ret = None
if self.mtlalpha == 0:
return ret
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad, language)
for name, m in self.named_modules():
if isinstance(m, CTC) and m.probs is not None:
ret = m.probs.cpu().numpy()
self.train()
return ret
def calculate_all_attentions(self, xs_pad, ilens, ys_pad, language):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: attention weights (B, H, Lmax, Tmax)
:rtype: float ndarray
"""
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad, language)
ret = dict()
for name, m in self.named_modules():
if (
isinstance(m, MultiHeadedAttention)
or isinstance(m, DynamicConvolution)
or isinstance(m, RelPositionMultiHeadedAttention)
):
ret[name] = m.attn.cpu().numpy()
if isinstance(m, DynamicConvolution2D):
ret[name + "_time"] = m.attn_t.cpu().numpy()
ret[name + "_freq"] = m.attn_f.cpu().numpy()
self.train()
return ret
def calculate_sim_adapter_attentions(self, xs_pad, ilens, ys_pad, language):
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad, language)
ret = dict()
for name, m in self.named_modules():
if (
isinstance(m, MultiHeadedAttention)
or isinstance(m, DynamicConvolution)
or isinstance(m, RelPositionMultiHeadedAttention)
) and "sim_adapter" in name:
ret[name] = m.attn.cpu().numpy()
return ret
def encode(self, x, language):
self.eval()
x = torch.as_tensor(x).unsqueeze(0)
enc_output, _ = self.encoder(x, None, language)
return enc_output.squeeze(0)
def set_recognize_language_branch(self, language):
self.recognize_language_branch = language
def recognize_batch(self, xs, recog_args, char_list=None, rnnlm=None, language=None):
assert language is not None or self.recognize_language_branch is not None, \
"Recognize language is not specified"
if language is None:
language = self.recognize_language_branch
prev = self.training
self.eval()
ilens = numpy.fromiter((xx.shape[0] for xx in xs), dtype=numpy.int64)
# subsample frame
xs = [xx[:: self.subsample[0], :] for xx in xs]
xs = [to_device(self, to_torch_tensor(xx).float()) for xx in xs]
xs_pad = pad_list(xs, 0.0)
src_mask = make_non_pad_mask(ilens.tolist()).to(xs_pad.device).unsqueeze(-2)
# 1. Encoder
hs_pad, hs_mask = self.encoder(xs_pad, src_mask, language)
hlens = torch.tensor([int(sum(mask[0])) for mask in hs_mask])
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
lpz = self.ctc[language].log_softmax(hs_pad)
normalize_score = False
else:
lpz = None
normalize_score = True
logging.info("max input length: " + str(hs_pad.size(1)))
# search params
batch = len(hlens)
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = getattr(recog_args, "ctc_weight", 0) # for NMT
att_weight = 1.0 - ctc_weight
n_bb = batch * beam
pad_b = to_device(hs_pad, torch.arange(batch) * beam).view(-1, 1)
max_hlens = hlens
if recog_args.maxlenratio == 0:
maxlens = max_hlens
else:
maxlens = [
max(1, int(recog_args.maxlenratio * max_hlen)) for max_hlen in max_hlens
]
minlen = min([int(recog_args.minlenratio * max_hlen) for max_hlen in max_hlens])
logging.info("max output lengths: " + str(maxlens))
logging.info("min output length: " + str(minlen))
vscores = to_device(hs_pad, torch.zeros(batch, beam))
rnnlm_state = None
import six
# initialize hypothesis
yseq = [[self.soss[language]] for _ in six.moves.range(n_bb)]
accum_odim_ids = [self.soss[language] for _ in six.moves.range(n_bb)]
stop_search = [False for _ in six.moves.range(batch)]
nbest_hyps = [[] for _ in six.moves.range(batch)]
ended_hyps = [[] for _ in six.moves.range(batch)]
exp_hs_mask = (
hs_mask.unsqueeze(1).repeat(1, beam, 1, 1).contiguous()
) # (batch, beam, 1, T)
exp_hs_mask = exp_hs_mask.view(n_bb, hs_mask.size()[1], hs_mask.size()[2])
exp_h = (
hs_pad.unsqueeze(1).repeat(1, beam, 1, 1).contiguous()
) # (batch, beam, T, F)
exp_h = exp_h.view(n_bb, hs_pad.size()[1], hs_pad.size()[2])
ctc_scorer, ctc_state = None, None
if lpz is not None:
scoring_num = min(
int(beam * CTC_SCORING_RATIO) if att_weight > 0.0 else 0,
lpz.size(-1),
)
ctc_scorer = CTCPrefixScoreTH(lpz, hlens, 0, self.eoss[language])
for i in six.moves.range(max(maxlens)):
logging.debug("position " + str(i))
# get nbest local scores and their ids
ys_mask = subsequent_mask(i + 1).to(hs_pad.device).unsqueeze(0)
ys = torch.tensor(yseq).to(hs_pad.device)
vy = to_device(hs_pad, torch.LongTensor(self._get_last_yseq(yseq)))
# local_att_scores (n_bb = beam * batch, vocab)
if self.decoder is not None:
local_att_scores = self.decoder.forward_one_step(
ys, ys_mask, exp_h, memory_mask=exp_hs_mask, language=language,
)[0]
else:
local_att_scores = to_device(
hs_pad, torch.zeros((n_bb, lpz.size(-1)), dtype=lpz.dtype)
)
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.buff_predict(rnnlm_state, vy, n_bb)
local_scores = local_att_scores + recog_args.lm_weight * local_lm_scores
else:
local_scores = local_att_scores
# ctc
if ctc_scorer:
local_scores = att_weight * local_att_scores
local_scores[:, 0] = self.logzero # avoid choosing blank
part_ids = (
torch.topk(local_scores, scoring_num, dim=-1)[1]
if scoring_num > 0
else None
)
local_ctc_scores, ctc_state = ctc_scorer(
yseq, ctc_state, part_ids
) # local_ctc_scores (n_bb, odim)
local_scores = local_scores + ctc_weight * local_ctc_scores
if rnnlm:
local_scores = local_scores + recog_args.lm_weight * local_lm_scores
local_scores = local_scores.view(batch, beam, self.odim_dict[language])
if i == 0:
local_scores[:, 1:, :] = self.logzero
# accumulate scores
eos_vscores = local_scores[:, :, self.eoss[language]] + vscores
vscores = vscores.view(batch, beam, 1).repeat(1, 1, self.odim_dict[language])
vscores[:, :, self.eoss[language]] = self.logzero
vscores = (vscores + local_scores).view(batch, -1) # (batch, odim * beam)
# global pruning
accum_best_scores, accum_best_ids = torch.topk(vscores, beam, 1)
accum_odim_ids = (
torch.fmod(accum_best_ids, self.odim_dict[language]).view(-1).data.cpu().tolist()
)
accum_padded_beam_ids = (
(accum_best_ids // self.odim_dict[language] + pad_b).view(-1).data.cpu().tolist()
)
y_prev = yseq[:][:]
yseq = self._index_select_list(yseq, accum_padded_beam_ids)
yseq = self._append_ids(yseq, accum_odim_ids)
vscores = accum_best_scores
vidx = to_device(hs_pad, torch.LongTensor(accum_padded_beam_ids))
# pick ended hyps
if i >= minlen:
k = 0
penalty_i = (i + 1) * penalty
thr = accum_best_scores[:, -1]
for samp_i in six.moves.range(batch):
if stop_search[samp_i]:
k = k + beam
continue
for beam_j in six.moves.range(beam):
_vscore = None
if eos_vscores[samp_i, beam_j] > thr[samp_i]:
yk = y_prev[k][:]
if len(yk) <= maxlens[samp_i]:
_vscore = eos_vscores[samp_i][beam_j] + penalty_i
rnnlm_idx = k
elif i == maxlens[samp_i] - 1:
yk = yseq[k][:]
_vscore = vscores[samp_i][beam_j] + penalty_i
rnnlm_idx = accum_padded_beam_ids[k]
if _vscore:
yk.append(self.eoss[language])
if rnnlm:
_vscore += recog_args.lm_weight * rnnlm.final(
rnnlm_state, index=rnnlm_idx
)
ended_hyps[samp_i].append(
{"yseq": yk, "score": _vscore.data.cpu().numpy()}
)
k = k + 1
# end detection
stop_search = [
stop_search[samp_i]
or end_detect(ended_hyps[samp_i], i)
or i >= maxlens[samp_i]
for samp_i in six.moves.range(batch)
]
stop_search_summary = list(set(stop_search))
if len(stop_search_summary) == 1 and stop_search_summary[0]:
break
if rnnlm:
rnnlm_state = self._index_select_lm_state(rnnlm_state, 0, vidx)
if ctc_scorer:
ctc_state = ctc_scorer.index_select_state(ctc_state, accum_best_ids)
torch.cuda.empty_cache()
dummy_hyps = [
{"yseq": [self.soss[language], self.eoss[language]], "score": numpy.array([-float("inf")])}
]
ended_hyps = [
ended_hyps[samp_i] if len(ended_hyps[samp_i]) != 0 else dummy_hyps
for samp_i in six.moves.range(batch)
]
if normalize_score:
for samp_i in six.moves.range(batch):
for x in ended_hyps[samp_i]:
x["score"] /= len(x["yseq"])
nbest_hyps = [
sorted(ended_hyps[samp_i], key=lambda x: x["score"], reverse=True)[
: min(len(ended_hyps[samp_i]), recog_args.nbest)
]
for samp_i in six.moves.range(batch)
]
if prev:
self.train()
return nbest_hyps
| 42,330 | 40.058196 | 177 | py |
NeuralSpeech | NeuralSpeech-master/AdapterASR/balanced_sampler.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Reference: https://github.com/khornlund/pytorch-balanced-sampler
import torch
import torch.utils.data
import random
import collections
import logging
import numpy as np
from torch.utils.data.sampler import BatchSampler, WeightedRandomSampler
class BalancedBatchSampler(torch.utils.data.sampler.Sampler):
'''
https://github.com/galatolofederico/pytorch-balanced-batch/blob/master/sampler.py
'''
def __init__(self, dataset, labels=None):
self.labels = labels
self.dataset = collections.defaultdict(list)
self.balanced_max = 0
# Save all the indices for all the classes
for idx in range(0, len(dataset)):
label = self._get_label(dataset, idx)
#break
self.dataset[label].append(idx)
self.balanced_max = max(self.balanced_max, len(self.dataset[label]))
#len(self.dataset[label]) if len(self.dataset[label]) > self.balanced_max else self.balanced_max
# Oversample the classes with fewer elements than the max
for label in self.dataset:
while len(self.dataset[label]) < self.balanced_max:
self.dataset[label].append(random.choice(self.dataset[label]))
self.keys = list(self.dataset.keys())
logging.warning(self.keys)
self.currentkey = 0
self.indices = [-1] * len(self.keys)
def __iter__(self):
while self.indices[self.currentkey] < self.balanced_max - 1:
self.indices[self.currentkey] += 1
yield self.dataset[self.keys[self.currentkey]][self.indices[self.currentkey]]
self.currentkey = (self.currentkey + 1) % len(self.keys)
self.indices = [-1] * len(self.keys)
def _get_label(self, dataset, idx):
#logging.warning(len(dataset))
# logging.warning(dataset[idx])
return dataset[idx][0][1]['category']#[1]['output'][0]['token'].split(' ')[1]
# def _get_label(self, dataset, idx, labels = None):
# if self.labels is not None:
# return self.labels[idx].item()
# else:
# raise Exception("You should pass the tensor of labels to the constructor as second argument")
def __len__(self):
return self.balanced_max * len(self.keys)
| 2,341 | 38.033333 | 108 | py |
NeuralSpeech | NeuralSpeech-master/AdapterASR/utils.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import logging
from espnet.asr.asr_utils import add_results_to_json
import argparse
import numpy as np
import collections
import json
def load_head_from_pretrained_model(model, model_path):
model_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
if "model" in model_dict.keys():
model_dict = model_dict["model"]
src_dict = {k: v for k, v in model_dict.items() if "decoder.embed." in k or "ctc." in k or "decoder.output_layer." in k}
dst_state = model.state_dict()
dst_state.update(src_dict)
for key in dst_state.keys():
if key in src_dict.keys():
logging.info("loading " + key)
model.load_state_dict(dst_state)
def load_adapter_from_pretrained_model(model, model_path, src_adapter, tgt_adapter):
'''
src_adapter, tgt_adapter: str, names of the source and target adapters to load parameters
'''
model_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
if "model" in model_dict.keys():
model_dict = model_dict["model"]
src_dict = {k.replace(src_adapter, tgt_adapter): v for k, v in model_dict.items() if f"adapters.{src_adapter}" in k}
dst_state = model.state_dict()
dst_state.update(src_dict)
for key in dst_state.keys():
if key in src_dict.keys():
logging.info("loading " + key)
model.load_state_dict(dst_state)
# ==================== EasyEspnet functions =================================
def str2bool(str):
return True if str.lower() == 'true' else False
def setup_logging(verbose=1):
if verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# Training stats
def dict_average(dic):
avg_key, avg_val = [], []
for key, lst in dic.items():
if key.endswith("_lst"):
avg_key.append(key[:-4])
avg_val.append(np.mean(lst))
for key, val in zip(avg_key, avg_val):
dic[key] = val
return dic
# Load and save
def load_pretrained_model(model, model_path, modules_to_load=None, exclude_modules=None):
'''
load_pretrained_model(model=model, model_path="",
modules_to_load=None, exclude_modules="")
'''
model_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
if exclude_modules:
for e in exclude_modules.split(","):
model_dict = {k: v for k, v in model_dict.items() if not k.startswith(e)}
if not modules_to_load:
src_dict = model_dict
else:
src_dict = {}
for module in modules_to_load.split(","):
src_dict.update({k: v for k, v in model_dict.items() if k.startswith(module)})
dst_state = model.state_dict()
dst_state.update(src_dict)
model.load_state_dict(dst_state)
def torch_save(model, save_path, optimizer=None, local_rank=0):
if local_rank != 0:
return
if hasattr(model, "module"):
state_dict = model.module.state_dict() if not optimizer else collections.OrderedDict(model=model.module.state_dict(), optimizer=optimizer.state_dict())
else:
state_dict = model.state_dict() if not optimizer else collections.OrderedDict(model=model.state_dict(), optimizer=optimizer.state_dict())
torch.save(state_dict, save_path)
def torch_load(snapshot_path, model, optimizer=None):
# load snapshot
snapshot_dict = torch.load(snapshot_path, map_location=lambda storage, loc: storage)
if not "model" in snapshot_dict.keys():
model_dict = snapshot_dict
snapshot_dict = collections.OrderedDict(model=model_dict)
if hasattr(model, "module"):
model.module.load_state_dict(snapshot_dict["model"])
else:
model.load_state_dict(snapshot_dict["model"])
if optimizer:
optimizer.load_state_dict(snapshot_dict["optimizer"])
del snapshot_dict
# Decoding
def compute_wer(ref, hyp, normalize=False):
"""Compute Word Error Rate.
[Reference]
https://martin-thoma.com/word-error-rate-calculation/
Args:
ref (list): words in the reference transcript
hyp (list): words in the predicted transcript
normalize (bool, optional): if True, divide by the length of ref
Returns:
wer (float): Word Error Rate between ref and hyp
n_sub (int): the number of substitution
n_ins (int): the number of insertion
n_del (int): the number of deletion
"""
# Initialisation
d = np.zeros((len(ref) + 1) * (len(hyp) + 1), dtype=np.uint16)
d = d.reshape((len(ref) + 1, len(hyp) + 1))
for i in range(len(ref) + 1):
for j in range(len(hyp) + 1):
if i == 0:
d[0][j] = j
elif j == 0:
d[i][0] = i
# Computation
for i in range(1, len(ref) + 1):
for j in range(1, len(hyp) + 1):
if ref[i - 1] == hyp[j - 1]:
d[i][j] = d[i - 1][j - 1]
else:
sub_tmp = d[i - 1][j - 1] + 1
ins_tmp = d[i][j - 1] + 1
del_tmp = d[i - 1][j] + 1
d[i][j] = min(sub_tmp, ins_tmp, del_tmp)
wer = d[len(ref)][len(hyp)]
# Find out the manipulation steps
x = len(ref)
y = len(hyp)
error_list = []
while True:
if x == 0 and y == 0:
break
else:
if x > 0 and y > 0:
if d[x][y] == d[x - 1][y - 1] and ref[x - 1] == hyp[y - 1]:
error_list.append("C")
x = x - 1
y = y - 1
elif d[x][y] == d[x][y - 1] + 1:
error_list.append("I")
y = y - 1
elif d[x][y] == d[x - 1][y - 1] + 1:
error_list.append("S")
x = x - 1
y = y - 1
else:
error_list.append("D")
x = x - 1
elif x == 0 and y > 0:
if d[x][y] == d[x][y - 1] + 1:
error_list.append("I")
y = y - 1
else:
error_list.append("D")
x = x - 1
elif y == 0 and x > 0:
error_list.append("D")
x = x - 1
else:
raise ValueError
n_sub = error_list.count("S")
n_ins = error_list.count("I")
n_del = error_list.count("D")
n_cor = error_list.count("C")
assert wer == (n_sub + n_ins + n_del)
assert n_cor == (len(ref) - n_sub - n_del)
if normalize:
wer /= len(ref)
return wer, n_sub, n_ins, n_del, n_cor
def token2text(tokens, bpemodel=None):
if bpemodel:
text = bpemodel.decode_pieces(tokens)
else:
text = (
" ".join(tokens)
.replace(" ", "")
.replace("<space>", " ")
) # sclite does not consider the number of spaces when splitting
return text
def recognize_and_evaluate(dataloader, model, args, model_path=None, wer=False, write_to_json=False):
if model_path:
torch_load(model_path, model)
orig_model = model
if hasattr(model, "module"):
model = model.module
if write_to_json:
# read json data
assert args.result_label and args.recog_json
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
model.eval()
recog_args = {
"beam_size": args.beam_size,
"penalty": args.penalty,
"ctc_weight": args.ctc_weight,
"maxlenratio": args.maxlenratio,
"minlenratio": args.minlenratio,
"lm_weight": args.lm_weight,
"rnnlm": args.rnnlm,
"nbest": args.nbest,
"space": args.sym_space,
"blank": args.sym_blank,
}
recog_args = argparse.Namespace(**recog_args)
#progress_bar = tqdm(dataloader)
#progress_bar.set_description("Testing CER/WERs")
err_dict = (
dict(cer=None)
if not wer
else dict(cer=collections.defaultdict(int), wer=collections.defaultdict(int))
)
with torch.no_grad():
for batch_idx, data in enumerate(dataloader):
logging.warning(f"Testing CER/WERs: {batch_idx+1}/{len(dataloader)}")
fbank, ilens, tokens = data
fbanks = []
for i, fb in enumerate(fbank):
fbanks.append(fb[: ilens[i], :])
fbank = fbanks
nbest_hyps = model.recognize_batch(
fbank, recog_args, char_list=None, rnnlm=None
)
y_hats = [nbest_hyp[0]["yseq"][1:-1] for nbest_hyp in nbest_hyps]
if write_to_json:
for utt_idx in range(len(fbank)):
name = dataloader.dataset[batch_idx][utt_idx][0]
new_js[name] = add_results_to_json(
js[name], nbest_hyps[utt_idx], args.char_list
)
for i, y_hat in enumerate(y_hats):
y_true = tokens[i]
hyp_token = [
args.char_list[int(idx)] for idx in y_hat if int(idx) != -1
]
ref_token = [
args.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
for key in sorted(err_dict.keys()): # cer then wer
if key == "wer":
ref_token = token2text(ref_token, args.bpemodel).split()
hyp_token = token2text(hyp_token, args.bpemodel).split()
logging.debug("HYP: " + str(hyp_token))
logging.debug("REF: " + str(ref_token))
utt_err, utt_nsub, utt_nins, utt_ndel, utt_ncor = compute_wer(
ref_token, hyp_token
)
err_dict[key]["n_word"] += len(ref_token)
if utt_err != 0:
err_dict[key]["n_err"] += utt_err # Char / word error
err_dict[key]["n_ser"] += 1 # Sentence error
err_dict[key]["n_cor"] += utt_ncor
err_dict[key]["n_sub"] += utt_nsub
err_dict[key]["n_ins"] += utt_nins
err_dict[key]["n_del"] += utt_ndel
err_dict[key]["n_sent"] += 1
for key in err_dict.keys():
err_dict[key]["err"] = err_dict[key]["n_err"] / err_dict[key]["n_word"] * 100.0
err_dict[key]["ser"] = err_dict[key]["n_ser"] / err_dict[key]["n_word"] * 100.0
torch.cuda.empty_cache()
if write_to_json:
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
model = orig_model
return err_dict | 11,240 | 36.47 | 159 | py |
NeuralSpeech | NeuralSpeech-master/AdapterASR/data_load.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from espnet.utils.training.batchfy import make_batchset
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
import torch
import os
import json
import kaldiio
import random
import logging
import sentencepiece as spm
from balanced_sampler import BalancedBatchSampler
#cv mt cnh ky dv sl el lv fy-NL sah
data_config = {
"template100": {
"train": "dump/train_template/deltafalse/data_unigram100.json",
"val": "dump/dev_template/deltafalse/data_unigram100.json",
"test": "dump/test_template/deltafalse/data_unigram100.json",
"token": "data/template_lang_char/train_template_unigram100_units.txt",
"prefix": "/D_data/commonvoice/asr1/",
"bpemodel": "data/template_lang_char/train_template_unigram100.model",
},
"template150": {
"train": "dump/train_template/deltafalse/data_unigram150.json",
"val": "dump/dev_template/deltafalse/data_unigram150.json",
"test": "dump/test_template/deltafalse/data_unigram150.json",
"token": "data/template_lang_char/train_template_unigram150_units.txt",
"prefix": "/D_data/commonvoice/asr1/",
"bpemodel": "data/template_lang_char/train_template_unigram150.model",
},
}
low_resource_languages = ["ro", "cs", "br", "ar", "uk"]
def read_json_file(fname):
with open(fname, "rb") as f:
contents = json.load(f)["utts"]
return contents
def load_json(train_json_file, dev_json_file, test_json_file):
train_json = read_json_file(train_json_file)
if os.path.isfile(dev_json_file):
dev_json = read_json_file(dev_json_file)
else:
n_samples = len(train_json)
train_size = int(0.9 * n_samples)
logging.warning(
f"No dev set provided, will split the last {n_samples - train_size} (10%) samples from training data"
)
train_json_item = list(train_json.items())
# random.shuffle(train_json_item)
train_json = dict(train_json_item[:train_size])
dev_json = dict(train_json_item[train_size:])
# Save temp dev set
with open(dev_json_file, "w") as f:
json.dump({"utts": dev_json}, f)
logging.warning(f"Temporary dev set saved: {dev_json_file}")
test_json = read_json_file(test_json_file)
return train_json, dev_json, test_json
def load_data(root_path, dataset, args):
def collate(minibatch):
fbanks = []
tokens = []
for _, info in minibatch[0]:
fbanks.append(
torch.tensor(
kaldiio.load_mat(
info["input"][0]["feat"].replace(
data_config[dataset]["prefix"], root_path
)
)
)
)
tokens.append(
torch.tensor([int(s) for s in info["output"][0]["tokenid"].split()])
)
ilens = torch.tensor([x.shape[0] for x in fbanks])
return (
pad_sequence(fbanks, batch_first=True, padding_value=0),
ilens,
pad_sequence(tokens, batch_first=True, padding_value=-1),
)
language = dataset
if language in low_resource_languages:
template_key = "template100"
else:
template_key = "template150"
data_config[dataset] = data_config[template_key].copy()
for key in ["train", "val", "test", "token"]:
data_config[dataset][key] = data_config[template_key][key].replace("template", dataset)
train_json = os.path.join(root_path, data_config[dataset]["train"])
dev_json = (
os.path.join(root_path, data_config[dataset]["val"])
if data_config[dataset]["val"]
else f"{root_path}/tmp_dev_set_{dataset}.json"
)
test_json = os.path.join(root_path, data_config[dataset]["test"])
train_json, dev_json, test_json = load_json(train_json, dev_json, test_json)
_, info = next(iter(train_json.items()))
idim = info["input"][0]["shape"][1]
odim = info["output"][0]["shape"][1]
use_sortagrad = False # args.sortagrad == -1 or args.sortagrad > 0
# trainset = make_batchset(train_json, batch_size, max_length_in=800, max_length_out=150)
trainset = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if (args.ngpu > 1 and not args.dist_train) else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
# devset = make_batchset(dev_json, batch_size, max_length_in=800, max_length_out=150)
devset = make_batchset(
dev_json,
args.batch_size if args.ngpu <= 1 else int(args.batch_size / args.ngpu),
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
testset = make_batchset(
test_json,
args.batch_size if args.ngpu <= 1 else int(args.batch_size / args.ngpu),
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
if args.dist_train and args.ngpu > 1:
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
else:
train_sampler = None
train_loader = DataLoader(
trainset,
batch_size=1,
collate_fn=collate,
num_workers=args.n_iter_processes,
shuffle=(train_sampler is None),
pin_memory=True,
sampler=train_sampler,
)
dev_loader = DataLoader(
devset,
batch_size=1,
collate_fn=collate,
shuffle=False,
num_workers=args.n_iter_processes,
pin_memory=True,
)
test_loader = DataLoader(
testset,
batch_size=1,
collate_fn=collate,
shuffle=False,
num_workers=args.n_iter_processes,
pin_memory=True,
)
return (train_loader, dev_loader, test_loader), (idim, odim)
def load_multilingual_data(root_path, datasets, args, languages):
def collate(minibatch):
out = []
for b in minibatch:
fbanks = []
tokens = []
language = None
for _, info in b:
fbanks.append(
torch.tensor(
kaldiio.load_mat(
info["input"][0]["feat"].replace(
data_config[dataset]["prefix"], root_path
)
)
)
)
tokens.append(
torch.tensor([int(s) for s in info["output"][0]["tokenid"].split()])
)
if language is not None:
assert language == info['category']
else:
language = info['category']
ilens = torch.tensor([x.shape[0] for x in fbanks])
out.append((
pad_sequence(fbanks, batch_first=True, padding_value=0),
ilens,
pad_sequence(tokens, batch_first=True, padding_value=-1),
language,
))
return out[0] if len(out) == 1 else out
idim = None
odim_dict = {}
mtl_train_json, mtl_dev_json, mtl_test_json = {}, {}, {}
for idx, dataset in enumerate(datasets):
language = dataset
if language in low_resource_languages:
template_key = "template100"
else:
template_key = "template150"
data_config[dataset] = data_config[template_key].copy()
for key in ["train", "val", "test", "token"]:
data_config[dataset][key] = data_config[template_key][key].replace("template", dataset)
train_json = os.path.join(root_path, data_config[dataset]["train"])
dev_json = (
os.path.join(root_path, data_config[dataset]["val"])
if data_config[dataset]["val"]
else f"{root_path}/tmp_dev_set_{dataset}.json"
)
test_json = os.path.join(root_path, data_config[dataset]["test"])
train_json, dev_json, test_json = load_json(train_json, dev_json, test_json)
for key in train_json.keys():
train_json[key]['category'] = language
for key in dev_json.keys():
dev_json[key]['category'] = language
for key in test_json.keys():
test_json[key]['category'] = language
#print(train_json)
_, info = next(iter(train_json.items()))
if idim is not None:
assert idim == info["input"][0]["shape"][1]
else:
idim = info["input"][0]["shape"][1]
odim_dict[language] = info["output"][0]["shape"][1]
# Break if not in specified languages
if dataset not in languages:
continue
mtl_train_json.update(train_json)
mtl_dev_json.update(dev_json)
mtl_test_json.update(test_json)
#print(len(mtl_train_json), len(train_json))
train_json, dev_json, test_json = mtl_train_json, mtl_dev_json, mtl_test_json
use_sortagrad = False # args.sortagrad == -1 or args.sortagrad > 0
# trainset = make_batchset(train_json, batch_size, max_length_in=800, max_length_out=150)
if args.ngpu > 1 and not args.dist_train:
min_batch_size = args.ngpu
else:
min_batch_size = 1
if args.meta_train:
min_batch_size = 2 * min_batch_size
trainset = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=min_batch_size,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
# devset = make_batchset(dev_json, batch_size, max_length_in=800, max_length_out=150)
devset = make_batchset(
dev_json,
args.batch_size if args.ngpu <= 1 else int(args.batch_size / args.ngpu),
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
testset = make_batchset(
test_json,
args.batch_size if args.ngpu <= 1 else int(args.batch_size / args.ngpu),
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
if args.dist_train and args.ngpu > 1:
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
elif args.meta_train:
train_sampler = BalancedBatchSampler(trainset)
else:
train_sampler = None
train_loader = DataLoader(
trainset,
batch_size=1 if not args.meta_train else len(languages),
collate_fn=collate,
num_workers=args.n_iter_processes,
shuffle=(train_sampler is None),
pin_memory=True,
sampler=train_sampler,
)
dev_loader = DataLoader(
devset,
batch_size=1,
collate_fn=collate,
shuffle=False,
num_workers=args.n_iter_processes,
pin_memory=True,
)
test_loader = DataLoader(
testset,
batch_size=1,
collate_fn=collate,
shuffle=False,
num_workers=args.n_iter_processes,
pin_memory=True,
)
return (train_loader, dev_loader, test_loader), (idim, odim_dict)
def load_token_list(token_file):
with open(token_file, "r") as f:
token_list = [entry.split()[0] for entry in f]
token_list.insert(0, "<blank>")
token_list.append("<eos>")
return token_list
def load_bpemodel(root_path, dataset):
if dataset in low_resource_languages:
template_key = "template100"
else:
template_key = "template150"
bpemodel_path = os.path.join(root_path, data_config[template_key]["bpemodel"]).replace("template", dataset)
bpemodel = spm.SentencePieceProcessor()
bpemodel.Load(bpemodel_path)
return bpemodel
| 13,293 | 35.223433 | 113 | py |
NeuralSpeech | NeuralSpeech-master/AdapterASR/train.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import os
import collections
from espnet.bin.asr_train import get_parser
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.asr.pytorch_backend.asr_init import freeze_modules
from torch.nn.parallel import data_parallel
from torch.nn.utils.clip_grad import clip_grad_norm_
import torch
import numpy as np
import data_load
import random
import json
import sys
from utils import setup_logging, str2bool, dict_average
from utils import load_pretrained_model, load_head_from_pretrained_model, torch_save, torch_load
from utils import recognize_and_evaluate
import math
from e2e_asr_adaptertransformer import E2E as E2EAdapterTransformer
import matplotlib.pyplot as plt
import copy
def add_custom_arguments(parser):
# EasyEspnet arguments
parser.add_argument('--data_file', type=str, default=None)
parser.add_argument("--root_path", type=str, required=True, help="Path to the ESPnet features, e.g.: <espnet_path>/egs/commonvoice/asr1/")
#parser.add_argument("--root_path", type=str, default="/opt/espnet/egs/an4/asr1/")
parser.add_argument('--dataset', type=str, required=True, help="Dataset name to be referred in data_load, e.g.: an4")
#parser.add_argument("--dataset", type=str, default="an4")
parser.add_argument("--exp", type=str, default="exp")
parser.add_argument("--decoding_mode", type=str2bool, default=False, help="if true, then only perform decoding test")
parser.add_argument("--load_pretrained_model", type=str, default="", nargs="?",
help="<model_path>:<load_modules>:<exclude_modules>")
parser.add_argument("--compute_cer", type=str2bool, default=True)
parser.add_argument("--decoding_config", type=str, default=None)
parser.add_argument(
"--bpemodel", type=bool, default=True
) # Set to true when testing CER/WERs
parser.add_argument("--dist_train", type=str2bool, default=False)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--result_label", type=str, default=None)
parser.add_argument("--recog_json", type=str, default=None)
parser.add_argument("--adam_lr", type=float, default=1e-3)
# Adapter-related
parser.add_argument("--use_adapters", type=str2bool, default=True, help="whether to inject adapters into the model")
parser.add_argument("--shared_adapter", type=str, default=None, help="Share one adapter across all languages")
parser.add_argument('--adapter_train_languages', type=str, default=None,
help="Splitted by _, e.g., en_zh_jp")
parser.add_argument("--train_adapter_with_head", type=str2bool,
default=False, help="whether to train adapter with language-specific head jointly")
# SimAdapter-related
parser.add_argument("--sim_adapter", type=str2bool, default=False)
parser.add_argument("--fusion_languages", type=str, default=None)
parser.add_argument("--guide_loss_weight", type=float, default=0.1)
parser.add_argument("--guide_loss_weight_decay_steps", type=int, default=0)
# MAML-related
parser.add_argument("--meta_train", type=str2bool, default=False)
parser.add_argument("--meta_lr", type=float, default=1.0, help="used for meta-learning outer step")
# Others
parser.add_argument("--load_head_from_pretrained_model", type=str, default="", nargs="?",
help="<model_path>")
def train_epoch(dataloader, model, optimizer, epoch=None):
model.train()
stats = collections.defaultdict(list)
for batch_idx, data in enumerate(dataloader):
fbank, seq_lens, tokens, language = data
fbank, seq_lens, tokens = fbank.cuda(), seq_lens.cuda(), tokens.cuda()
if isinstance(optimizer, dict):
optimizer[language].zero_grad()
else:
optimizer.zero_grad()
model.zero_grad()
if args.ngpu <= 1 or args.dist_train:
ctc_att_loss, sim_adapter_guide_loss = model(fbank, seq_lens, tokens, language)# .mean() # / self.accum_grad
else:
# apex does not support torch.nn.DataParallel
ctc_att_loss, sim_adapter_guide_loss = (
data_parallel(model, (fbank, seq_lens, tokens, language), range(args.ngpu))# .mean() # / self.accum_grad
)
loss = ctc_att_loss.mean()
if args.sim_adapter:
if hasattr(model, "module"):
sim_adapter_reg_loss = model.module.get_fusion_regularization_loss()
else:
sim_adapter_reg_loss = model.get_fusion_regularization_loss()
loss = loss + sim_adapter_reg_loss
stats["sim_adapter_reg_loss_lst"].append(sim_adapter_reg_loss.item())
if args.guide_loss_weight > 0:
if args.guide_loss_weight_decay_steps > 0:
n_batch = len(dataloader)
current_iter = float(batch_idx + (epoch - 1) * n_batch)
frac_done = 1.0 * float(current_iter) / args.guide_loss_weight_decay_steps
current_weight = args.guide_loss_weight * max(0., 1. - frac_done)
stats["sim_adapter_guide_loss_weight"] = current_weight
else:
current_weight = args.guide_loss_weight
sim_adapter_guide_loss = sim_adapter_guide_loss.mean()
loss = loss + current_weight * sim_adapter_guide_loss
stats["sim_adapter_guide_loss_lst"].append(sim_adapter_guide_loss.item())
if not hasattr(model, "module"):
if hasattr(model, "acc") and model.acc is not None:
stats["acc_lst"].append(model.acc)
model.acc = None
else:
if hasattr(model, "acc") and model.module.acc is not None:
stats["acc_lst"].append(model.module.acc)
model.module.acc = None
loss.backward()
grad_norm = clip_grad_norm_(model.parameters(), args.grad_clip)
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
if isinstance(optimizer, dict):
optimizer[language].step()
else:
optimizer.step()
stats["loss_lst"].append(loss.item())
logging.warning(f"Training batch: {batch_idx+1}/{len(dataloader)}")
return dict_average(stats)
def train_maml_epoch(dataloader, model, optimizer, epoch=None):
model.train()
stats = collections.defaultdict(list)
for batch_idx, total_batches in enumerate(dataloader):
i = batch_idx # current iteration in epoch
len_dataloader = len(dataloader) # total iteration in epoch
meta_iters = args.epochs * len_dataloader
current_iter = float(i + (epoch - 1) * len_dataloader)
frac_done = 1.0 * float(current_iter) / meta_iters
current_outerstepsize = args.meta_lr * (1. - frac_done)
weights_original = copy.deepcopy(model.state_dict())
new_weights = []
for total_batch in total_batches: # Iter by languages
in_batch_size = int(total_batch[0].shape[0] / 2) # In-language batch size
for meta_step in range(2): # Meta-train & meta-valid
if meta_step == 1:
last_backup = copy.deepcopy(model.state_dict())
else:
last_backup = None
batch = list(copy.deepcopy(total_batch))
for i_batch in range(len(batch)-1):
batch[i_batch] = batch[i_batch][meta_step*in_batch_size:(1+meta_step)*in_batch_size]
batch = tuple(batch)
fbank, seq_lens, tokens, language = batch
fbank, seq_lens, tokens = fbank.cuda(), seq_lens.cuda(), tokens.cuda()
optimizer.zero_grad()
model.zero_grad()
if args.ngpu <= 1 or args.dist_train:
loss = model(fbank, seq_lens, tokens, language).mean() # / self.accum_grad
else:
# apex does not support torch.nn.DataParallel
loss = (
data_parallel(model, (fbank, seq_lens, tokens, language), range(args.ngpu)).mean() # / self.accum_grad
)
# print(loss.item())
loss.backward()
grad_norm = clip_grad_norm_(model.parameters(), args.grad_clip)
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
optimizer.step()
if meta_step == 1: # Record meta valid
if not hasattr(model, "module"):
if hasattr(model, "acc") and model.acc is not None:
stats["acc_lst"].append(model.acc)
model.acc = None
else:
if hasattr(model, "acc") and model.module.acc is not None:
stats["acc_lst"].append(model.module.acc)
model.module.acc = None
stats["loss_lst"].append(loss.item())
stats["meta_lr"] = current_outerstepsize
optimizer.zero_grad()
for name in last_backup:
# Compute meta-gradient
last_backup[name] = model.state_dict()[name] - last_backup[name]
# Change back to the original parameters for the new language
new_weights.append(last_backup) # updates.append(subtract_vars(self._model_state.export_variables(), last_backup))
model.load_state_dict({ name: weights_original[name] for name in weights_original})
ws = len(new_weights)
# Compute average meta-gradient
fweights = { name : new_weights[0][name]/float(ws) for name in new_weights[0] }
for i in range(1, ws):
for name in new_weights[i]:
fweights[name] = fweights[name] + new_weights[i][name] / float(ws)
model.load_state_dict({name : weights_original[name] + (fweights[name] * current_outerstepsize) for name in weights_original})
logging.warning(f"Training batch: {batch_idx+1}/{len(dataloader)}")
return dict_average(stats)
def test(epoch, dataloader, model, model_path=None, language=None, visualize_sim_adapter=False):
if model_path:
torch_load(model_path, model)
orig_model = None
if hasattr(model, "module"):
orig_model = model
model = model.module
model.eval()
stats = collections.defaultdict(list)
for batch_idx, data in enumerate(dataloader):
logging.warning(f"Testing batch: {batch_idx+1}/{len(dataloader)}")
if len(data) == 4:
fbank, seq_lens, tokens, language = data
else:
assert language is not None
fbank, seq_lens, tokens = data
fbank, seq_lens, tokens = fbank.cuda(), seq_lens.cuda(), tokens.cuda()
with torch.no_grad():
loss = model(fbank, seq_lens, tokens, language)
if visualize_sim_adapter:
atts = model.calculate_sim_adapter_attentions(fbank, seq_lens, tokens, language)
init_mat = lambda: np.zeros((len(model.fusion_languages),))
avg_atts = collections.defaultdict(init_mat)
count = collections.defaultdict(int)
for key in atts.keys():
avg_atts[key] = avg_atts[key] + atts[key].sum(axis=(0, 1))
count[key] = count[key] + atts[key].shape[0] * atts[key].shape[1]
stats["loss_lst"].append(loss.item())
if not hasattr(model, "module"):
if model.acc is not None:
stats["acc_lst"].append(model.acc)
model.acc = None
else:
if model.module.acc is not None:
stats["acc_lst"].append(model.module.acc)
model.module.acc = None
if visualize_sim_adapter:
for key in avg_atts.keys():
avg_atts[key] = avg_atts[key] / count[key]
logging.warning(f"Attention scores of {key}: {avg_atts[key]}")
fig = plt.figure(figsize=(16, 8))
ax = fig.subplots()
atts, labels = [], []
for key in avg_atts.keys():
atts.append(avg_atts[key])
labels.append(key)
atts = np.stack(atts)
tick_marks = np.arange(len(labels))
ax.set_yticks(tick_marks)
ax.set_yticklabels(labels)
x_labels = list(sorted(model.fusion_languages))
ax.set_xticks(np.arange(len(x_labels)))
ax.set_xticklabels(x_labels)
ax.imshow(atts)
import itertools
for i, j in itertools.product(range(atts.shape[0]), range(atts.shape[1])):
plt.text(j, i, "{:0.2f}".format(atts[i, j]),
horizontalalignment="center",
color="white")
fig.tight_layout()
fig.savefig(f"{args.outdir}/att_{epoch}.png")
plt.close()
if orig_model is not None:
model = orig_model
return dict_average(stats)
def train(dataloaders, model, optimizer, save_path):
train_loader, val_loader, test_loader = dataloaders
best_loss = float("inf")
early_stop = 0
log_json = []
for epoch in range(args.start_epoch, args.epochs + 1):
early_stop += 1
epoch_stats = collections.OrderedDict(epoch=epoch)
train_stats = train_epoch(train_loader, model, optimizer, epoch)
valid_stats = test(f"val_{epoch}", val_loader, model, visualize_sim_adapter=args.sim_adapter)
if best_loss > valid_stats["loss"]: # Save loss best model
best_loss = valid_stats["loss"]
torch_save(model, save_path)
early_stop = 0
test_stats = test(f"test_{epoch}", test_loader, model)
logging.warning(
f"Epoch: {epoch}, Iteration: {epoch * len(train_loader)}, "
+ f"train loss: {train_stats['loss']:.4f}, dev loss: {valid_stats['loss']:.3f}, test loss: {test_stats['loss']:.3f}"
)
torch_save(model, f"{args.outdir}/snapshot.ep.{epoch}", optimizer=optimizer)
for key in sorted(list(set(list(train_stats.keys()) + list(test_stats.keys())))):
if not key.endswith("_lst"):
if key in train_stats:
epoch_stats[f"main/{key}"] = train_stats[key]
if key in valid_stats:
epoch_stats[f"validation/main/{key}"] = valid_stats[key]
if key in test_stats:
epoch_stats[f"test/main/{key}"] = test_stats[key]
log_json.append(epoch_stats)
with open(f"{args.outdir}/log", "w") as f:
json.dump(log_json, f,
indent=4,
ensure_ascii=False,
separators=(",", ": "),
)
logging.warning(f"Log saved at {args.outdir}/log")
if args.patience > 0 and early_stop >= args.patience:
test_stats = test("test_best", test_loader, model, save_path)
logging.warning(f"=====Early stop! Final best test loss: {test_stats['loss']}")
break
if __name__ == "__main__":
# 执行该命令运行4 GPU训练:CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=2 train.py
setup_logging(verbose=0) # Should come first before other package import logging
parser = get_parser()
add_custom_arguments(parser)
arg_list = sys.argv[1:] + [
"--dict", '',
#"--dataset", "_".join("cv mt cnh ky dv sl el lv fyNL sah".split()),
]
if "--config" not in arg_list:
arg_list += ["--config", "config/train.yaml"]
if "--outdir" not in arg_list:
arg_list += ["--outdir", '']
args, _ = parser.parse_known_args(arg_list)
# Use all GPUs
ngpu = torch.cuda.device_count() if args.ngpu is None else args.ngpu
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
[str(item) for item in range(ngpu)])
logging.warning(f"ngpu: {ngpu}")
# set random seed
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
set_deterministic_pytorch(args)
torch.cuda.manual_seed(args.seed)
if ngpu > 1:
torch.cuda.manual_seed_all(args.seed) # multi-gpu setting
model_module = "e2e_asr_adaptertransformer:E2E"
model_class = E2EAdapterTransformer
model_class.add_arguments(parser)
args = parser.parse_args(arg_list)
setattr(args, "conf_name", ".".join(os.path.basename(args.config).split(".")[:-1]))
if not args.outdir:
args.outdir = f"./outputs/results_{args.dataset}/{args.conf_name}"
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
setattr(args, "ngpu", ngpu)
if args.data_file is not None:
args.root_path = args.data_file
if args.ngpu > 1:
if args.opt == "noam" and hasattr(args, "transformer_lr"):
logging.warning(f"Multi-GPU training: increase transformer lr {args.transformer_lr} --> {args.transformer_lr * np.sqrt(args.ngpu)}")
args.transformer_lr = args.transformer_lr * np.sqrt(args.ngpu)
elif (args.opt == "adam" or args.meta_train) and hasattr(args, "adam_lr"):
logging.warning(f"Multi-GPU training: increase adam lr {args.adam_lr} --> {args.adam_lr * np.sqrt(args.ngpu)}")
args.adam_lr = args.adam_lr * np.sqrt(args.ngpu)
if args.dist_train:
torch.distributed.init_process_group(backend="nccl")
local_rank = torch.distributed.get_rank()
args.local_rank = local_rank
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
else:
logging.warning(
"Training batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
if args.accum_grad > 1:
logging.warning(
"gradient accumulation is not implemented. batch size is increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.accum_grad)
)
args.batch_size *= args.accum_grad
args.accum_grad = 1
dataloaders = {}
token_dict = {}
idim, odim_dict = None, {}
args.dataset = args.dataset.split("_")
languages = args.dataset
data_load_languages = languages
if args.adapter_train_languages is not None:
args.adapter_train_languages = args.adapter_train_languages.split("_")
data_load_languages = args.adapter_train_languages
else:
logging.warning("adapter_train_languages is None, will use all datasets for training")
args.adapter_train_languages = args.dataset
dataloaders, (idim, odim_dict) = data_load.load_multilingual_data(args.root_path, args.dataset, args, data_load_languages)
for idx, data_set in enumerate(args.dataset):
if languages[idx] not in data_load_languages:
continue
token_dict[languages[idx]] = data_load.load_token_list(
os.path.join(args.root_path, data_load.data_config[data_set]["token"])
)
setattr(args, "char_list", token_dict)
model = model_class(idim, odim_dict, args, languages)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to " + model_conf)
f.write(
json.dumps(
(idim, odim_dict, vars(args)),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
model.cuda()
if args.freeze_mods:
model, model_params = freeze_modules(model, args.freeze_mods)
else:
model_params = model.parameters()
logging.warning("Trainable parameters:")
for name, parameter in model.named_parameters():
if parameter.requires_grad:
logging.warning(name)
# Setup an optimizer
if args.meta_train:
logging.warning(f"Use Adam optimizer with lr={args.adam_lr}, beta0=0 for meta-training inner step")
optimizer = torch.optim.Adam(model_params, lr=args.adam_lr, betas=(0, 0.999), weight_decay=args.weight_decay)
else:
if args.opt == "adadelta":
optimizer = torch.optim.Adadelta(
model_params, rho=0.95, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "adam":
logging.warning(f"Using Adam optimizer with lr={args.adam_lr}")
optimizer = torch.optim.Adam(model_params, lr=args.adam_lr, weight_decay=args.weight_decay)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(
model_params, args.adim, args.transformer_warmup_steps, args.transformer_lr
)
if len(args.adapter_train_languages) > 1 and not args.sim_adapter and not args.shared_adapter:
model_params = collections.defaultdict(list)
optimizer = {}
for lang in args.adapter_train_languages:
for name, parameter in model.named_parameters():
if parameter.requires_grad and lang in name.split("."):
model_params[lang].append(parameter)
logging.warning(f"Number of trainable parameters for language {lang} " + str(sum(p.numel() for p in model_params[lang])))
optimizer[lang] = torch.optim.Adam(model_params[lang], lr=args.adam_lr, weight_decay=args.weight_decay)
# Resume from a snapshot
if args.resume:
logging.warning("resumed from %s" % args.resume)
torch_load(args.resume, model, optimizer)
setattr(args, "start_epoch", int(args.resume.split('.')[-1]) + 1)
else:
setattr(args, "start_epoch", 1)
if args.load_pretrained_model:
model_path, modules_to_load, exclude_modules = args.load_pretrained_model.split(":")
logging.warning("load pretrained model from %s" % args.load_pretrained_model)
load_pretrained_model(model=model, model_path=model_path,
modules_to_load=modules_to_load, exclude_modules=exclude_modules)
if args.load_head_from_pretrained_model:
logging.warning("load pretrained model head from %s" % args.load_head_from_pretrained_model)
load_head_from_pretrained_model(model=model, model_path=args.load_head_from_pretrained_model)
logging.warning(
"Total parameter of the model = "
+ str(sum(p.numel() for p in model.parameters()))
)
logging.warning(
"Trainable parameter of the model = "
+ str(sum(p.numel() for p in filter(lambda x: x.requires_grad, model.parameters())))
)
if args.ngpu > 1 and args.dist_train:
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=True,
)
save_path = f"{args.outdir}/model.loss.best"
if args.meta_train:
train_epoch = train_maml_epoch
if not args.decoding_mode:
train(dataloaders, model, optimizer, save_path)
if args.compute_cer and args.local_rank == 0:
# For CER/WER computing
for idx, dataset in enumerate(args.dataset):
language = languages[idx]
if args.adapter_train_languages and not (language in args.adapter_train_languages):
continue
if args.bpemodel and "bpemodel" in data_load.data_config[dataset]:
logging.warning(f"load bpe model for dataset {dataset}")
args.bpemodel = data_load.load_bpemodel(args.root_path, dataset)
dataloaders, _ = data_load.load_data(args.root_path, dataset, args)
splits = ["test", "val"]
for split in splits:
logging.warning(f"---------Recognizing {dataset} {split}----------")
args.result_label = f"{args.outdir}/{dataset}_{split}_recog.json"
if not data_load.data_config[dataset][split]:
split_path = os.path.join(args.root_path, f"{args.root_path}/tmp_dev_set_{dataset}.json")
else:
split_path = data_load.data_config[dataset][split]
args.recog_json = os.path.join(args.root_path, split_path)
idx = ["train", "val", "test"].index(split)
test_stats = test(f"{split}_best", dataloaders[idx],
model,
save_path,
language=language,
visualize_sim_adapter=args.sim_adapter)
logging.warning(f"Loss: {test_stats['loss']}")
err_dict = recognize_and_evaluate(dataloaders[idx], model, args, language,
model_path=save_path, wer=True, write_to_json=True)
logging.warning(f"CER: {err_dict['cer']['err']}")
logging.warning(f"WER: {err_dict['wer']['err']}") | 25,635 | 46.650558 | 144 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/modules/diffusion.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# The diffusion acoustic decoder module is based on the DiffWave architecture: https://github.com/lmnt-com/diffwave
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt
Linear = nn.Linear
ConvTranspose2d = nn.ConvTranspose2d
def Conv1d(*args, **kwargs):
layer = nn.Conv1d(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer
@torch.jit.script
def silu(x):
return x * torch.sigmoid(x)
class DiffusionEmbedding(nn.Module):
def __init__(self, max_steps):
super().__init__()
self.register_buffer('embedding', self._build_embedding(max_steps), persistent=False)
self.projection1 = Linear(128, 512)
self.projection2 = Linear(512, 512)
def forward(self, diffusion_step):
if diffusion_step.dtype in [torch.int32, torch.int64]:
x = self.embedding[diffusion_step]
else:
x = self._lerp_embedding(diffusion_step)
x = self.projection1(x)
x = silu(x)
x = self.projection2(x)
x = silu(x)
return x
def _lerp_embedding(self, t):
low_idx = torch.floor(t).long()
high_idx = torch.ceil(t).long()
low = self.embedding[low_idx]
high = self.embedding[high_idx]
return low + (high - low) * (t - low_idx)
def _build_embedding(self, max_steps):
steps = torch.arange(max_steps).unsqueeze(1) # [T,1]
dims = torch.arange(64).unsqueeze(0) # [1,64]
table = steps * 10.0**(dims * 4.0 / 63.0) # [T,64]
table = torch.cat([torch.sin(table), torch.cos(table)], dim=1)
return table
class ResidualBlock(nn.Module):
def __init__(self, n_mels, residual_channels, conditioner_channels, dilation):
super().__init__()
self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation)
self.diffusion_projection = Linear(512, residual_channels)
self.conditioner_projection = Conv1d(conditioner_channels, 2 * residual_channels, 1)
self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1)
def forward(self, x, conditioner, diffusion_step):
diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
conditioner = self.conditioner_projection(conditioner)
y = x + diffusion_step
y = self.dilated_conv(y) + conditioner
gate, filter = torch.chunk(y, 2, dim=1)
y = torch.sigmoid(gate) * torch.tanh(filter)
y = self.output_projection(y)
residual, skip = torch.chunk(y, 2, dim=1)
return (x + residual) / sqrt(2.0), skip
class DiffDecoder(nn.Module):
def __init__(self, params):
super().__init__()
self.params = params
self.use_phone_stat = params['use_phone_stat']
self.condition_phone_stat = params['condition_phone_stat'] if 'condition_phone_stat' in params else False
if self.use_phone_stat and self.condition_phone_stat:
self.input_projection = Conv1d(params.n_mels * 3 , params.residual_channels, 1) # to concat target_mean and target_std
else:
self.input_projection = Conv1d(params.n_mels, params.residual_channels, 1)
self.diffusion_embedding = DiffusionEmbedding(len(params.noise_schedule))
self.residual_layers = nn.ModuleList([
ResidualBlock(params.n_mels, params.residual_channels, params.conditioner_channels, 2**(i % params.dilation_cycle_length))
for i in range(params.residual_layers)
])
self.skip_projection = Conv1d(params.residual_channels, params.residual_channels, 1)
self.output_projection = Conv1d(params.residual_channels, params.n_mels, 1)
nn.init.zeros_(self.output_projection.weight)
def forward(self, input, decoder_inp, target_mean, target_std, mel2ph, diffusion_step):
x = input.permute(0, 2, 1)
decoder_inp = decoder_inp.permute(0, 2, 1) # make it [B, 256, frame]
mask = (mel2ph != 0).float().unsqueeze(1) # [B, 1, frame]
if self.use_phone_stat:
assert target_mean is not None and target_std is not None
if self.condition_phone_stat:
target_mean = target_mean.permute(0, 2, 1)
target_std = target_std.permute(0, 2, 1)
x = torch.cat([x, target_mean, target_std], dim=1)
x = self.input_projection(x)
x = F.relu(x) * mask
diffusion_step = self.diffusion_embedding(diffusion_step)
skip = []
for layer in self.residual_layers:
x, skip_connection = layer(x, decoder_inp, diffusion_step)
# apply mask
x, skip_connection = x * mask, skip_connection * mask
skip.append(skip_connection)
x = torch.sum(torch.stack(skip), dim=0) / sqrt(len(self.residual_layers))
x = self.skip_projection(x)
x = F.relu(x) * mask
x = self.output_projection(x) * mask
x = x.permute(0, 2, 1) # back to [B, frame, 80]
return x
def sample(self, decoder_inp, target_mean, target_std, mel2ph, device=torch.device('cuda'), fast_sampling=False, return_all=False):
with torch.no_grad():
# Change in notation from the DiffWave paper for fast sampling.
# DiffWave paper -> Implementation below
# --------------------------------------
# alpha -> talpha
# beta -> training_noise_schedule
# gamma -> alpha
# eta -> beta
training_noise_schedule = np.array(self.params.noise_schedule)
inference_noise_schedule = np.array(
self.params.inference_noise_schedule) if fast_sampling else training_noise_schedule
talpha = 1 - training_noise_schedule
talpha_cum = np.cumprod(talpha)
beta = inference_noise_schedule
alpha = 1 - beta
alpha_cum = np.cumprod(alpha)
T = []
for s in range(len(inference_noise_schedule)):
for t in range(len(training_noise_schedule) - 1):
if talpha_cum[t + 1] <= alpha_cum[s] <= talpha_cum[t]:
twiddle = (talpha_cum[t] ** 0.5 - alpha_cum[s] ** 0.5) / (talpha_cum[t] ** 0.5 - talpha_cum[t + 1] ** 0.5)
T.append(t + twiddle)
break
T = np.array(T, dtype=np.float32)
# Expand rank 2 tensors by adding a batch dimension.
if len(decoder_inp.shape) == 2:
decoder_inp = decoder_inp.unsqueeze(0)
decoder_inp = decoder_inp.to(device)
mel_list = []
if target_mean is not None and target_std is not None: # start from N(0, sigma)
mel = torch.randn(decoder_inp.shape[0], decoder_inp.shape[1], self.params.n_mels, device=device) * target_std
else: # start from N(0, I)
mel = torch.randn(decoder_inp.shape[0], decoder_inp.shape[1], self.params.n_mels, device=device)
mel_list.append(mel.clone() + target_mean if target_mean is not None else mel.clone())
# return "failed" mel if the target fast inference schedule is "unsupported" by the algorithm
if len(T) != len(inference_noise_schedule):
print("WARNING: given fast inference schedule {} is not supported. returning noise as output!".format(inference_noise_schedule))
if return_all:
return mel, mel_list
else:
return mel, None
for n in range(len(alpha) - 1, -1, -1):
c1 = 1 / alpha[n] ** 0.5
c2 = beta[n] / (1 - alpha_cum[n]) ** 0.5
if target_mean is not None and target_std is not None:
mel = c1 * (mel - c2 * self.forward(mel, decoder_inp, target_mean, target_std, mel2ph, torch.tensor([T[n]], device=mel.device))) # mean prediction will be same as the original
else:
mel = c1 * (mel - c2 * self.forward(mel, decoder_inp, target_mean, target_std, mel2ph, torch.tensor([T[n]], device=mel.device)))
if n > 0:
if target_mean is not None and target_std is not None:
noise = torch.randn_like(mel) * target_std
else:
noise = torch.randn_like(mel)
sigma = ((1.0 - alpha_cum[n - 1]) / (1.0 - alpha_cum[n]) * beta[n]) ** 0.5
mel += sigma * noise
mel_list.append(mel.clone() + target_mean if target_mean is not None else mel.clone())
if target_mean is not None:
mel = mel + target_mean # recover mean from the denoised sample
mel_list.append(mel.clone())
if return_all:
return mel, mel_list
else:
return mel, None | 8,890 | 39.04955 | 185 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/modules/base.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import torch
class BaseModule(torch.nn.Module):
def __init__(self):
super(BaseModule, self).__init__()
@property
def nparams(self):
"""
Returns number of trainable parameters of the module.
"""
num_params = 0
for name, param in self.named_parameters():
if param.requires_grad:
num_params += np.prod(param.detach().cpu().numpy().shape)
return num_params
def relocate_input(self, x: list):
"""
Relocates provided tensors to the same device set for the module.
"""
device = next(self.parameters()).device
for i in range(len(x)):
if isinstance(x[i], torch.Tensor) and x[i].device != device:
x[i] = x[i].to(device)
return x
| 893 | 26.090909 | 73 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/modules/tts_modules.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
from modules.operations import SinusoidalPositionalEmbedding, OPERATIONS_ENCODER, OPERATIONS_DECODER
from tts_utils.hparams import hparams
DEFAULT_MAX_SOURCE_POSITIONS = 2000
DEFAULT_MAX_TARGET_POSITIONS = 2000
class TransformerEncoderLayer(nn.Module):
def __init__(self, layer, hidden_size, dropout):
super().__init__()
self.layer = layer
self.hidden_size = hidden_size
self.dropout = dropout
if layer == 13:
self.op = OPERATIONS_ENCODER[layer](hidden_size, dropout, hparams['gaus_bias'], hparams['gaus_tao'])
else:
self.op = OPERATIONS_ENCODER[layer](hidden_size, dropout)
def forward(self, x, **kwargs):
return self.op(x, **kwargs)
class TransformerDecoderLayer(nn.Module):
def __init__(self, layer, hidden_size, dropout):
super().__init__()
self.layer = layer
self.hidden_size = hidden_size
self.dropout = dropout
self.op = OPERATIONS_DECODER[layer](hidden_size, dropout)
def forward(self, x, **kwargs):
return self.op(x, **kwargs)
def clear_buffer(self, *args):
return self.op.clear_buffer(*args)
def set_buffer(self, *args):
return self.op.set_buffer(*args)
######################
# fastspeech modules
######################
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""
def __init__(self, nout, dim=-1):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=1e-12)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
class DurationPredictor(torch.nn.Module):
"""Duration predictor module.
This is a module of duration predictor described in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
The duration predictor predicts a duration of each frame in log domain from the hidden embeddings of encoder.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
Note:
The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`,
the outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
"""
def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0, padding='SAME'):
"""Initilize duration predictor module.
Args:
idim (int): Input dimension.
n_layers (int, optional): Number of convolutional layers.
n_chans (int, optional): Number of channels of convolutional layers.
kernel_size (int, optional): Kernel size of convolutional layers.
dropout_rate (float, optional): Dropout rate.
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(DurationPredictor, self).__init__()
self.offset = offset
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
self.padding = padding
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [torch.nn.Sequential(
torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
torch.nn.ReLU(),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate)
)]
self.linear = torch.nn.Linear(n_chans, 1)
def _forward(self, xs, x_masks=None, is_inference=False):
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
if self.padding == 'SAME':
xs = F.pad(xs, [self.kernel_size // 2, self.kernel_size // 2])
elif self.padding == 'LEFT':
xs = F.pad(xs, [self.kernel_size - 1, 0])
xs = f(xs) # (B, C, Tmax)
if x_masks is not None:
xs = xs * (1 - x_masks.float())[:, None, :]
# NOTE: calculate in log domain
xs = self.linear(xs.transpose(1, -1)).squeeze(-1) # (B, Tmax)
if is_inference:
# NOTE: calculate in linear domain
xs = torch.clamp(torch.round(xs.exp() - self.offset), min=0).long() # avoid negative value
if x_masks is not None:
xs = xs.masked_fill(x_masks, 0.0)
return xs
def forward(self, xs, x_masks=None):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
Returns:
Tensor: Batch of predicted durations in log domain (B, Tmax).
"""
return self._forward(xs, x_masks, False)
def inference(self, xs, x_masks=None):
"""Inference duration.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
Returns:
LongTensor: Batch of predicted durations in linear domain (B, Tmax).
"""
return self._forward(xs, x_masks, True)
class DurationPredictorLoss(torch.nn.Module):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0, reduction="none"):
"""Initilize duration predictor loss module.
Args:
offset (float, optional): Offset value to avoid nan in log domain.
reduction (str): Reduction type in loss calculation.
"""
super(DurationPredictorLoss, self).__init__()
self.criterion = torch.nn.MSELoss(reduction=reduction)
self.offset = offset
def forward(self, outputs, targets, nonpadding):
"""Calculate forward propagation.
Args:
outputs (Tensor): Batch of prediction durations in log domain (B, T)
targets (LongTensor): Batch of groundtruth durations in linear domain (B, T)
Returns:
Tensor: Mean squared error loss value.
Note:
`outputs` is in log domain but `targets` is in linear domain.
"""
# NOTE: outputs is in log domain while targets in linear
targets = torch.log(targets.float() + self.offset)
loss = self.criterion(outputs, targets.float())
loss = (loss * nonpadding).sum() / nonpadding.sum()
return loss
def pad_list(xs, pad_value, max_len=None):
"""Perform padding for the list of tensors.
Args:
xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
pad_value (float): Value for padding.
Returns:
Tensor: Padded tensor (B, Tmax, `*`).
Examples:
>>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
>>> x
[tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
>>> pad_list(x, 0)
tensor([[1., 1., 1., 1.],
[1., 1., 0., 0.],
[1., 0., 0., 0.]])
"""
n_batch = len(xs)
if max_len is None:
max_len = max(x.size(0) for x in xs)
pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
for i in range(n_batch):
pad[i, :min(xs[i].size(0), max_len)] = xs[i][:max_len]
return pad
class LengthRegulator(torch.nn.Module):
"""Length regulator module for feed-forward Transformer.
This is a module of length regulator described in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
The length regulator expands char or phoneme-level embedding features to frame-level by repeating each
feature based on the corresponding predicted durations.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, pad_value=0.0):
"""Initilize length regulator module.
Args:
pad_value (float, optional): Value used for padding.
"""
super(LengthRegulator, self).__init__()
self.pad_value = pad_value
def forward(self, ds, ilens, alpha=1.0, max_len=None):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of sequences of char or phoneme embeddings (B, Tmax, D).
ds (LongTensor): Batch of durations of each frame (B, T).
ilens (LongTensor): Batch of input lengths (B,).
alpha (float, optional): Alpha value to control speed of speech.
Returns:
Tensor: replicated input tensor based on durations (B, T*, D).
"""
assert alpha > 0
if alpha != 1.0:
ds = torch.round(ds.float() * alpha).long()
ds = [d[:ilen] for d, ilen in zip(ds, ilens)]
mel2ph = [self._repeat_one_sequence(torch.arange(len(d)).to(d.device), d) + 1 for d in ds]
return pad_list(mel2ph, 0, max_len).long()
def _repeat_one_sequence(self, x, d):
"""Repeat each frame according to duration.
Examples:
>>> x = torch.tensor([[1], [2], [3]])
tensor([[1],
[2],
[3]])
>>> d = torch.tensor([1, 2, 3])
tensor([1, 2, 3])
>>> self._repeat_one_sequence(x, d)
tensor([[1],
[2],
[2],
[3],
[3],
[3]])
"""
if d.sum() == 0:
logging.warn("all of the predicted durations are 0. fill 0 with 1.")
d = d.fill_(1)
return torch.cat([x_.repeat(int(d_), 1) for x_, d_ in zip(x, d) if d_ != 0], dim=0)
class PitchPredictor(torch.nn.Module):
def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5,
dropout_rate=0.1, padding='SAME'):
"""Initilize pitch predictor module.
Args:
idim (int): Input dimension.
n_layers (int, optional): Number of convolutional layers.
n_chans (int, optional): Number of channels of convolutional layers.
kernel_size (int, optional): Kernel size of convolutional layers.
dropout_rate (float, optional): Dropout rate.
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(PitchPredictor, self).__init__()
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
self.padding = padding
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [torch.nn.Sequential(
# torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)
# if padding == 'SAME'
# else (kernel_size - 1, 0), 0),
torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
torch.nn.ReLU(),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate)
)]
self.linear = torch.nn.Linear(n_chans, odim)
self.embed_positions = SinusoidalPositionalEmbedding(idim, 0, init_size=4096)
self.pos_embed_alpha = nn.Parameter(torch.Tensor([1]))
def forward(self, xs):
"""
:param xs: [B, T, H]
:return: [B, T, H]
"""
positions = self.pos_embed_alpha * self.embed_positions(xs[..., 0])
xs = xs + positions
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
if self.padding == 'SAME':
xs = F.pad(xs, [self.kernel_size // 2, self.kernel_size // 2])
elif self.padding == 'LEFT':
xs = F.pad(xs, [self.kernel_size - 1, 0])
xs = f(xs) # (B, C, Tmax)
# NOTE: calculate in log domain
xs = self.linear(xs.transpose(1, -1)) # (B, Tmax, H)
return xs
class EnergyPredictor(PitchPredictor):
pass
class FastspeechDecoder(nn.Module):
def __init__(self, arch, hidden_size=None, dropout=None):
super().__init__()
self.arch = arch # arch = encoder op code
self.num_layers = len(arch)
if hidden_size is not None:
embed_dim = self.hidden_size = hidden_size
else:
embed_dim = self.hidden_size = hparams['hidden_size']
if dropout is not None:
self.dropout = dropout
else:
self.dropout = hparams['dropout']
self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS
self.padding_idx = 0
self.pos_embed_alpha = nn.Parameter(torch.Tensor([1]))
self.embed_positions = SinusoidalPositionalEmbedding(
embed_dim, self.padding_idx,
init_size=self.max_source_positions + self.padding_idx + 1,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(self.arch[i], self.hidden_size, self.dropout)
for i in range(self.num_layers)
])
self.layer_norm = nn.LayerNorm(embed_dim)
def forward(self, x, require_w=False):
"""
:param x: [B, T, C]
:param require_w: True if this module needs to return weight matrix
:return: [B, T, C]
"""
padding_mask = x.abs().sum(-1).eq(0).data
positions = self.pos_embed_alpha * self.embed_positions(x[..., 0])
x = x + positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# encoder layers
attn_w = []
if require_w:
for layer in self.layers:
x, attn_w_i = layer(x, encoder_padding_mask=padding_mask, require_w=require_w)
attn_w.append(attn_w_i)
else:
# modules/operations.py:122, modules.operations.EncSALayer
for layer in self.layers:
x = layer(x, encoder_padding_mask=padding_mask) # remember to assign back to x
x = self.layer_norm(x)
x = x.transpose(0, 1)
return (x, attn_w) if require_w else x
| 14,686 | 37.447644 | 116 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/modules/operations.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.onnx.operators
import torch.nn.functional as F
import tts_utils
from tts_utils.hparams import hparams
class SelfAttention(nn.Module):
def __init__(self, hid_dim, n_heads, dropout=0.1, gaussian_bias=False, gaussian_tao=None,
gaus_init_l=3000):
super().__init__()
self.hid_dim = hid_dim
self.n_heads = n_heads
assert hid_dim % n_heads == 0
self.w_q = Linear(hid_dim, hid_dim)
self.w_k = Linear(hid_dim, hid_dim)
self.w_v = Linear(hid_dim, hid_dim)
self.gaussian_bias = gaussian_bias
if gaussian_bias:
self.tao = nn.Parameter(torch.FloatTensor(n_heads))
nn.init.constant_(self.tao, gaussian_tao) # sigma = tao^2
# pre construct a gaussian matrix without dividing sigma^2
self.bias_matrix = torch.Tensor([[-abs(i - j) ** 2 / 2.
for i in range(gaus_init_l)] for j in range(gaus_init_l)])
self.fc = Linear(hid_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
self.sqrt_d = (hid_dim // n_heads) ** -0.5
def forward(self, query, key, value, mask=None, require_w=False):
# Q,K,V计算与变形: input is L B C
for m in [query, key, value]:
m.transpose_(0, 1) # convert to B L C
bsz, length, emb_dim = query.shape
Q = self.w_q(query) # B, L, hid_emb
K = self.w_k(key)
V = self.w_v(value)
# -1 means the dim should be inferred
# B, L, n, C//n,把embedding分成n份, n为num_head
Q = Q.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads)
Q = Q.permute(0, 2, 1, 3) # B, n, L, C//n
K = K.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads).permute(0, 2, 1, 3)
V = V.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads).permute(0, 2, 1, 3)
QK = torch.matmul(Q, K.transpose(2, 3)) * self.sqrt_d # Transpose last two dim, out = B, n, L, L
if self.gaussian_bias:
# gaussian distribution: L*L
L = QK.size(-1)
# get matrix of -(i-j)^2 / 2: i, j \in [0, L-1]
if L <= self.bias_matrix.size(0):
gaussian_mask = self.bias_matrix[:L, :L].repeat(self.n_heads, 1, 1).to(QK.device) # n. L. L.
else: # 样本太长, 预构建的数值矩阵不够大
gaussian_mask = torch.tensor([[-abs(i - j) ** 2 / 2.
for i in range(L)] for j in range(L)]
).repeat(self.n_heads, 1, 1).to(QK.device)
print("Tensor is too long, size:", L)
# divide gaussian matrix by tao^4 (multiply by tao^(-4))
# tao: nn.Parameters, size [n_head]
gaussian_mask = torch.mul(gaussian_mask, torch.pow(self.tao, -4)[:, None, None]) # out shape n,L,L
QK += gaussian_mask.repeat(bsz, 1, 1, 1).to(QK.device) # expand mask n,L,L to B, n, L, L
if mask is not None:
'''
attn weight size: b*n_h, L, L
mask size: b, L -> b, 1, 1, L +
attn_weight(b,n_head,L,L).masked_fill(mask)
'''
QK = QK.masked_fill(mask[:, None, None, :], float('-inf'))
# 然后对Q,K相乘的结果 计算softmax 计算weight
attn_weight = torch.softmax(QK, dim=-1)
attention = self.dropout(attn_weight)
# 第三步,attention结果与V相乘
x = torch.matmul(attention, V) # B, n, L, C//n
# 最后将多头排列好,就是multi-head attention的结果了
x = x.permute(0, 2, 1, 3).contiguous() # B, L, n, C//n
x = x.view(bsz, -1, self.n_heads * (self.hid_dim // self.n_heads)) # B, L, C
x = self.fc(x)
x.transpose_(0, 1) # return L B C
# return to operations.py: EncGausSALayer.forward()
return (x, attn_weight[:1, :1, ...]) if require_w else (x, None) # remember to add bracket
class EncGausSALayer(nn.Module):
def __init__(self, hid_dim, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1, gaus_bias=False,
gaus_tao=10):
super().__init__()
self.dropout = dropout
self.layer_norm1 = LayerNorm(hid_dim)
# hid_dim, n_heads, dropout=0.1, gaussian_bias=False, gaussian_tao=None
self.self_attn_gaus_bias = SelfAttention(hid_dim, num_heads, attention_dropout, gaus_bias, gaus_tao)
self.layer_norm2 = LayerNorm(hid_dim)
self.ffn = TransformerFFNLayer(hid_dim, 4 * hid_dim, kernel_size=9, dropout=relu_dropout)
def forward(self, x, encoder_padding_mask=None, require_w=False, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
# print("EncGausSA, kwargs", str(kwargs))
# require_w = kwargs.get('require_w', False)
# print("EncGausSA, require_w", require_w)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
residual = x
x = self.layer_norm1(x)
# x = self.self_attn_gaus_bias(query=x, key=x, value=x, mask=encoder_padding_mask)
x, attn_w = self.self_attn_gaus_bias(query=x, key=x, value=x, mask=encoder_padding_mask, require_w=require_w)
# print("self_attn return:", type(x))
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm2(x)
x = self.ffn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
return (x, attn_w) if require_w else x
class CyclicalPositionEmb(nn.Module):
def __init__(self, K, emb_size):
super(CyclicalPositionEmb, self).__init__()
self.fc = Linear(K, emb_size)
def forward(self, x):
'''
:param x: B * T * 1
:return: x
'''
pass # todo
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if not export and torch.cuda.is_available():
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None, timestep=None, **kwargs):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = tts_utils.make_positions(input, self.padding_idx)
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
class ConvTBC(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super(ConvTBC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.padding = padding
self.weight = torch.nn.Parameter(torch.Tensor(
self.kernel_size, in_channels, out_channels))
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
def forward(self, input):
return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding)
class EncConvLayer(nn.Module):
def __init__(self, c, kernel_size, dropout):
super().__init__()
self.layer_norm = LayerNorm(c)
conv = ConvTBC(c, c, kernel_size, padding=kernel_size // 2)
std = math.sqrt((4 * (1.0 - dropout)) / (kernel_size * c))
nn.init.normal_(conv.weight, mean=0, std=std)
nn.init.constant_(conv.bias, 0)
self.conv = nn.utils.weight_norm(conv, dim=2)
self.dropout = dropout
def forward(self, x, encoder_padding_mask=None, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm.training = layer_norm_training
residual = x
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.t().unsqueeze(-1), 0)
x = self.layer_norm(x)
x = self.conv(x)
x = F.relu(x)
x = F.dropout(x, self.dropout, self.training)
x = x + residual
return x
class MultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
add_bias_kv=False, add_zero_attn=False, self_attention=False,
encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \
'value to be of the same size'
if self.qkv_same_dim:
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
else:
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.enable_torch_version = False
if hasattr(F, "multi_head_attention_forward"):
self.enable_torch_version = True
else:
self.enable_torch_version = False
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
else:
nn.init.xavier_uniform_(self.k_proj_weight)
nn.init.xavier_uniform_(self.v_proj_weight)
nn.init.xavier_uniform_(self.q_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query, key, value,
key_padding_mask=None,
incremental_state=None,
need_weights=True,
static_kv=False,
attn_mask=None,
before_softmax=False,
need_head_weights=False,
enc_dec_attn_constraint_mask=None
):
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if self.enable_torch_version and incremental_state is None and not static_kv:
if self.qkv_same_dim:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
self.in_proj_weight,
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask)
else:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
torch.empty([0]),
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
# self-attention
q, k, v = self.in_proj_qkv(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k = self.in_proj_k(key)
v = self.in_proj_v(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if 'prev_key' in saved_state:
prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
k = torch.cat((prev_key, k), dim=1)
if 'prev_value' in saved_state:
prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
v = torch.cat((prev_value, v), dim=1)
if 'prev_key_padding_mask' in saved_state and saved_state['prev_key_padding_mask'] is not None:
prev_key_padding_mask = saved_state['prev_key_padding_mask']
if static_kv:
key_padding_mask = prev_key_padding_mask
else:
key_padding_mask = torch.cat((prev_key_padding_mask, key_padding_mask), dim=1)
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state['prev_key_padding_mask'] = key_padding_mask
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
float('-inf'),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = tts_utils.softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
else:
attn_weights = None
return attn, (attn_weights, attn_logits)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_q(self, query):
if self.qkv_same_dim:
return self._in_proj(query, end=self.embed_dim)
else:
bias = self.in_proj_bias
if bias is not None:
bias = bias[:self.embed_dim]
return F.linear(query, self.q_proj_weight, bias)
def in_proj_k(self, key):
if self.qkv_same_dim:
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
else:
weight = self.k_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[self.embed_dim:2 * self.embed_dim]
return F.linear(key, weight, bias)
def in_proj_v(self, value):
if self.qkv_same_dim:
return self._in_proj(value, start=2 * self.embed_dim)
else:
weight = self.v_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[2 * self.embed_dim:]
return F.linear(value, weight, bias)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def _get_input_buffer(self, incremental_state):
return tts_utils.get_incremental_state(
self,
incremental_state,
'attn_state',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
tts_utils.set_incremental_state(
self,
incremental_state,
'attn_state',
buffer,
)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
return attn_weights
def clear_buffer(self, incremental_state=None):
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
del saved_state['prev_key']
if 'prev_value' in saved_state:
del saved_state['prev_value']
self._set_input_buffer(incremental_state, saved_state)
class TransformerFFNLayer(nn.Module):
def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0.):
super().__init__()
self.kernel_size = kernel_size
self.dropout = dropout
if kernel_size == 1:
self.ffn_1 = Linear(hidden_size, filter_size)
else:
if padding == 'SAME':
assert kernel_size % 2 == 1
self.first_offset = -((kernel_size - 1) // 2)
else:
assert padding == 'LEFT'
self.first_offset = -(kernel_size - 1)
self.last_offset = self.first_offset + kernel_size - 1
self.ffn_1 = nn.ModuleList()
for i in range(kernel_size):
self.ffn_1.append(Linear(hidden_size, filter_size, bias=(i == 0)))
self.ffn_2 = Linear(filter_size, hidden_size)
def forward(self, x, incremental_state=None):
# x: T x B x C
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_input' in saved_state:
prev_input = saved_state['prev_input']
x = torch.cat((prev_input, x), dim=0)
x = x[-self.kernel_size:]
saved_state['prev_input'] = x
self._set_input_buffer(incremental_state, saved_state)
if self.kernel_size == 1:
x = self.ffn_1(x)
else:
padded = F.pad(x, (0, 0, 0, 0, -self.first_offset, self.last_offset))
results = []
for i in range(self.kernel_size):
shifted = padded[i:x.size(0) + i] if i else x
results.append(self.ffn_1[i](shifted))
res = sum(results)
x = res * self.kernel_size ** -0.5
if incremental_state is not None:
x = x[-1:]
x = F.relu(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.ffn_2(x)
return x
def _get_input_buffer(self, incremental_state):
return tts_utils.get_incremental_state(
self,
incremental_state,
'f',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
tts_utils.set_incremental_state(
self,
incremental_state,
'f',
buffer,
)
def clear_buffer(self, incremental_state):
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_input' in saved_state:
del saved_state['prev_input']
self._set_input_buffer(incremental_state, saved_state)
class NewTransformerFFNLayer(nn.Module):
def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0.):
super().__init__()
self.kernel_size = kernel_size
self.dropout = dropout
if padding == 'SAME':
self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2)
elif padding == 'LEFT':
self.ffn_1 = nn.Sequential(
nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
nn.Conv1d(hidden_size, filter_size, kernel_size)
)
self.ffn_2 = Linear(filter_size, hidden_size)
def forward(self, x, incremental_state=None):
# x: T x B x C
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_input' in saved_state:
prev_input = saved_state['prev_input']
x = torch.cat((prev_input, x), dim=0)
x = x[-self.kernel_size:]
saved_state['prev_input'] = x
self._set_input_buffer(incremental_state, saved_state)
x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
x = x * self.kernel_size ** -0.5
if incremental_state is not None:
x = x[-1:]
x = F.relu(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.ffn_2(x)
return x
def _get_input_buffer(self, incremental_state):
return tts_utils.get_incremental_state(
self,
incremental_state,
'f',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
tts_utils.set_incremental_state(
self,
incremental_state,
'f',
buffer,
)
def clear_buffer(self, incremental_state):
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_input' in saved_state:
del saved_state['prev_input']
self._set_input_buffer(incremental_state, saved_state)
class EncSALayer(nn.Module):
def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1,
kernel_size=9, padding='SAME'):
super().__init__()
self.c = c
self.dropout = dropout
self.layer_norm1 = LayerNorm(c)
self.self_attn = MultiheadAttention(
self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False,
)
self.layer_norm2 = LayerNorm(c)
if hparams['use_new_ffn']:
self.ffn = NewTransformerFFNLayer(c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding)
else:
self.ffn = TransformerFFNLayer(c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding)
def forward(self, x, encoder_padding_mask=None, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
residual = x
x = self.layer_norm1(x)
x, _, = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
residual = x
x = self.layer_norm2(x)
x = self.ffn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
return x
class EncLocalSALayer(nn.Module):
def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1):
super().__init__()
self.c = c
self.dropout = dropout
self.layer_norm1 = LayerNorm(c)
self.self_attn = MultiheadAttention(
self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False,
)
self.layer_norm2 = LayerNorm(c)
self.ffn = TransformerFFNLayer(c, 4 * c, kernel_size=9, dropout=relu_dropout)
self.chunk_size = 101
def forward(self, x, encoder_padding_mask=None, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
residual = x
x = self.layer_norm1(x)
states = []
T = x.shape[0]
all_neg_inf = tts_utils.fill_with_neg_inf2(x.new(T, T))
half_chunk_size = self.chunk_size // 2
attn_mask = torch.triu(all_neg_inf, half_chunk_size + 1) \
+ torch.tril(all_neg_inf, -half_chunk_size - 1)
encoder_padding_mask = encoder_padding_mask.data
for i in range(0, x.shape[0], half_chunk_size + 1):
k_start = max(0, i - half_chunk_size)
k_end = min(x.shape[0], i + self.chunk_size)
kv = x[k_start:k_end]
q = x[i:i + half_chunk_size + 1]
q_nonpadding = (1 - encoder_padding_mask[:, i:i + half_chunk_size + 1].float()).data
encoder_padding_mask_ = encoder_padding_mask[:, k_start:k_end].data
encoder_padding_mask_[q_nonpadding.sum(-1) == 0, :] = 0
x_, _, = self.self_attn(
query=q,
key=kv,
value=kv,
key_padding_mask=encoder_padding_mask_,
attn_mask=attn_mask[i:i + half_chunk_size + 1, k_start:k_end]
)
x_ = x_ * (1 - q_nonpadding.T[:, :, None])
states.append(x_)
x = torch.cat(states)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm2(x)
x = self.ffn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
return x
class EncLSTMLayer(nn.Module):
def __init__(self, c, dropout):
super().__init__()
self.c = c
self.layer_norm = LayerNorm(c)
self.lstm = nn.LSTM(c, c, 1, bidirectional=True)
self.out_proj = Linear(2 * c, c)
self.dropout = dropout
def forward(self, x, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm.training = layer_norm_training
self.lstm.flatten_parameters()
residual = x
x = self.layer_norm(x)
x, _ = self.lstm(x)
x = self.out_proj(x)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
return x
class ConvAttentionLayer(nn.Module):
def __init__(self, c, hidden_size, dropout=0.):
super().__init__()
self.in_projection = Linear(c, hidden_size)
self.out_projection = Linear(hidden_size, c)
self.dropout = dropout
def forward(self, x, key, value, encoder_padding_mask=None, enc_dec_attn_constraint_mask=None):
# x, key, value : T x B x C
# attention
query = self.in_projection(x)
attn_weights = torch.bmm(query.transpose(0, 1), key.transpose(0, 1).transpose(1, 2))
# don't attend over padding
if encoder_padding_mask is not None:
attn_weights = attn_weights.masked_fill(
encoder_padding_mask.unsqueeze(1),
float('-inf')
).type_as(attn_weights) # FP16 support: cast to float and back
if enc_dec_attn_constraint_mask is not None:
attn_weights = attn_weights.masked_fill(
enc_dec_attn_constraint_mask.bool(),
float('-inf'),
).type_as(attn_weights)
attn_logits = attn_weights
# softmax over last dim
sz = attn_weights.size()
attn_scores = F.softmax(attn_weights.view(sz[0] * sz[1], sz[2]), dim=1)
attn_scores = attn_scores.view(sz)
attn_scores = F.dropout(attn_scores, p=self.dropout, training=self.training)
attn = torch.bmm(attn_scores, value.transpose(0, 1)).transpose(0, 1)
# scale attention output (respecting potentially different lengths)
s = value.size(0)
if encoder_padding_mask is None:
attn = attn * (s * math.sqrt(1.0 / s))
else:
s = s - encoder_padding_mask.type_as(attn).sum(dim=1, keepdim=True) # exclude padding
s = s.transpose(0, 1).unsqueeze(-1)
attn = attn * (s * s.rsqrt())
# project back
attn = self.out_projection(attn)
return attn, attn_scores, attn_logits
class LinearizedConvolution(ConvTBC):
"""An optimized version of nn.Conv1d.
At training time, this module uses ConvTBC, which is an optimized version
of Conv1d. At inference time, it optimizes incremental generation (i.e.,
one time step at a time) by replacing the convolutions with linear layers.
Note that the input order changes from training to inference.
"""
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self._linearized_weight = None
self.register_backward_hook(self._clear_linearized_weight)
def forward(self, input, incremental_state=None):
"""
Args:
incremental_state: Used to buffer signal; if not None, then input is
expected to contain a single frame. If the input order changes
between time steps, call reorder_incremental_state.
Input:
Time x Batch x Channel
"""
if incremental_state is None:
output = super().forward(input)
if self.kernel_size > 1 and self.padding > 0:
# remove future timesteps added by padding
output = output[:-self.padding, :, :]
return output
# reshape weight
weight = self._get_linearized_weight()
kw = self.kernel_size
input = input.transpose(0, 1)
bsz = input.size(0) # input: bsz x len x dim
if kw > 1:
input = input.data
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = input.new(bsz, kw, input.size(2)).zero_()
self._set_input_buffer(incremental_state, input_buffer)
else:
# shift buffer
input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone()
# append next input
input_buffer[:, -1, :] = input[:, -1, :]
input = input_buffer
with torch.no_grad():
output = F.linear(input.view(bsz, -1), weight, self.bias)
return output.view(bsz, 1, -1).transpose(0, 1)
def _get_input_buffer(self, incremental_state):
return tts_utils.get_incremental_state(self, incremental_state, 'input_buffer')
def _set_input_buffer(self, incremental_state, new_buffer):
return tts_utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def _get_linearized_weight(self):
if self._linearized_weight is None:
kw = self.kernel_size
weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous()
assert weight.size() == (self.out_channels, kw, self.in_channels)
self._linearized_weight = weight.view(self.out_channels, -1)
return self._linearized_weight
def _clear_linearized_weight(self, *args):
self._linearized_weight = None
def clear_buffer(self, input, incremental_state=None):
if incremental_state is not None:
self._set_input_buffer(incremental_state, None)
class DecConvLayer(nn.Module):
def __init__(self, c, kernel_size, dropout, attention_dropout=0.1):
super().__init__()
self.layer_norm1 = LayerNorm(c)
conv = LinearizedConvolution(c, c, kernel_size, padding=kernel_size - 1)
std = math.sqrt((4 * (1.0 - dropout)) / (kernel_size * c))
nn.init.normal_(conv.weight, mean=0, std=std)
nn.init.constant_(conv.bias, 0)
self.conv = nn.utils.weight_norm(conv, dim=2)
self.layer_norm2 = LayerNorm(c)
# self.attention = ConvAttentionLayer(c, c, dropout=attention_dropout)
self.attention = MultiheadAttention(c, 1, dropout=attention_dropout, encoder_decoder_attention=True, bias=False)
self.dropout = dropout
def forward(self, x, encoder_out=None, encoder_padding_mask=None, incremental_state=None, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
residual = x
x = self.layer_norm1(x)
x = self.conv(x, incremental_state=incremental_state)
x = F.relu(x)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = self.layer_norm2(x)
x, attn = self.attention(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
enc_dec_attn_constraint_mask=tts_utils.get_incremental_state(self, incremental_state,
'enc_dec_attn_constraint_mask')
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
attn_logits = attn[1]
# if len(attn_logits.size()) > 3:
# attn_logits = attn_logits[:, 0]
return x, attn_logits
def clear_buffer(self, input, encoder_out=None, encoder_padding_mask=None, incremental_state=None):
self.conv.clear_buffer(input, incremental_state)
def set_buffer(self, name, tensor, incremental_state):
return tts_utils.set_incremental_state(self, incremental_state, name, tensor)
class DecSALayer(nn.Module):
def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1, kernel_size=9):
super().__init__()
self.c = c
self.dropout = dropout
self.layer_norm1 = LayerNorm(c)
self.self_attn = MultiheadAttention(
c, num_heads, self_attention=True, dropout=attention_dropout, bias=False
)
self.layer_norm2 = LayerNorm(c)
self.encoder_attn = MultiheadAttention(
c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False,
)
self.layer_norm3 = LayerNorm(c)
if hparams['use_new_ffn']:
self.ffn = NewTransformerFFNLayer(c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout)
else:
self.ffn = TransformerFFNLayer(c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout)
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
incremental_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
**kwargs,
):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
self.layer_norm3.training = layer_norm_training
residual = x
x = self.layer_norm1(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
attn_mask=self_attn_mask
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm2(x)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
enc_dec_attn_constraint_mask=tts_utils.get_incremental_state(self, incremental_state,
'enc_dec_attn_constraint_mask')
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm3(x)
x = self.ffn(x, incremental_state=incremental_state)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
attn_logits = attn[1]
# if len(attn_logits.size()) > 3:
# indices = attn_logits.softmax(-1).max(-1).values.sum(-1).argmax(-1)
# attn_logits = attn_logits.gather(1,
# indices[:, None, None, None].repeat(1, 1, attn_logits.size(-2), attn_logits.size(-1))).squeeze(1)
return x, attn_logits
def clear_buffer(self, input, encoder_out=None, encoder_padding_mask=None, incremental_state=None):
self.encoder_attn.clear_buffer(incremental_state)
self.ffn.clear_buffer(incremental_state)
def set_buffer(self, name, tensor, incremental_state):
return tts_utils.set_incremental_state(self, incremental_state, name, tensor)
class LSTMAttentionLayer(nn.Module):
def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False, dropout=0.):
super().__init__()
self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias)
self.output_proj = Linear(input_embed_dim + source_embed_dim, output_embed_dim, bias=bias)
self.dropout = dropout
def forward(self, input, source_hids, encoder_padding_mask=None, enc_dec_attn_constraint_mask=None):
# input: tgtlen x bsz x input_embed_dim
# source_hids: srclen x bsz x source_embed_dim
# x: tgtlen x bsz x source_embed_dim
x = self.input_proj(input)
# compute attention
attn_weights = torch.bmm(x.transpose(0, 1), source_hids.transpose(0, 1).transpose(1, 2))
# don't attend over padding
if encoder_padding_mask is not None:
attn_weights = attn_weights.float().masked_fill_(
encoder_padding_mask.unsqueeze(1),
float('-inf')
).type_as(attn_weights) # FP16 support: cast to float and back
if enc_dec_attn_constraint_mask is not None:
attn_weights = attn_weights.float().masked_fill_(
enc_dec_attn_constraint_mask.bool(),
float('-inf')
).type_as(attn_weights)
attn_logits = attn_weights
sz = attn_weights.size()
attn_scores = F.softmax(attn_weights.view(sz[0] * sz[1], sz[2]), dim=1)
attn_scores = attn_scores.view(sz)
attn_scores = F.dropout(attn_scores, p=self.dropout, training=self.training)
# sum weighted sources
attn = torch.bmm(attn_scores, source_hids.transpose(0, 1)).transpose(0, 1)
x = torch.tanh(self.output_proj(torch.cat((attn, input), dim=-1)))
return x, attn_scores, attn_logits
class DecLSTMLayer(nn.Module):
def __init__(self, c, dropout, attention_dropout=0.1):
super().__init__()
self.c = c
self.layer_norm1 = LayerNorm(c)
self.lstm = nn.LSTM(c, c, 1, dropout=dropout)
self.layer_norm2 = LayerNorm(c)
# self.attention = LSTMAttentionLayer(c, c, c, dropout=attention_dropout)
self.attention = MultiheadAttention(c, 1, dropout=attention_dropout, encoder_decoder_attention=True, bias=False)
self.dropout = dropout
def forward(self, x, encoder_out=None, encoder_padding_mask=None, incremental_state=None, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
self.lstm.flatten_parameters()
if incremental_state is not None:
x = x[-1:, :, :]
cached_state = tts_utils.get_incremental_state(self, incremental_state, 'cached_state')
if cached_state is not None:
prev_hiddens, prev_cells = cached_state
else:
prev_hiddens = encoder_out.mean(dim=0, keepdim=True)
prev_cells = encoder_out.mean(dim=0, keepdim=True)
residual = x
x = self.layer_norm1(x)
x, hidden = self.lstm(x, (prev_hiddens, prev_cells))
hiddens, cells = hidden
x = residual + x
x = self.layer_norm2(x)
x, attn = self.attention(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
enc_dec_attn_constraint_mask=tts_utils.get_incremental_state(self, incremental_state,
'enc_dec_attn_constraint_mask')
)
x = F.dropout(x, self.dropout, training=self.training)
if incremental_state is not None:
# prev_hiddens = torch.cat((prev_hiddens, hiddens), dim=0)
# prev_cells = torch.cat((prev_cells, cells), dim=0)
prev_hiddens = hiddens
prev_cells = cells
tts_utils.set_incremental_state(
self, incremental_state, 'cached_state',
(prev_hiddens, prev_cells),
)
x = residual + x
attn_logits = attn[1]
# if len(attn_logits.size()) > 3:
# attn_logits = attn_logits[:, 0]
return x, attn_logits
def clear_buffer(self, input, encoder_out=None, encoder_padding_mask=None, incremental_state=None):
if incremental_state is not None:
prev_hiddens = encoder_out.mean(dim=0, keepdim=True)
prev_cells = encoder_out.mean(dim=0, keepdim=True)
tts_utils.set_incremental_state(
self, incremental_state, 'cached_state',
(prev_hiddens, prev_cells)
)
def set_buffer(self, name, tensor, incremental_state):
return tts_utils.set_incremental_state(self, incremental_state, name, tensor)
OPERATIONS_ENCODER = { # c = hidden size
1: lambda c, dropout: EncConvLayer(c, 1, dropout), # h, num_heads, dropout
2: lambda c, dropout: EncConvLayer(c, 5, dropout),
3: lambda c, dropout: EncConvLayer(c, 9, dropout),
4: lambda c, dropout: EncConvLayer(c, 13, dropout),
5: lambda c, dropout: EncConvLayer(c, 17, dropout),
6: lambda c, dropout: EncConvLayer(c, 21, dropout),
7: lambda c, dropout: EncConvLayer(c, 25, dropout),
8: lambda c, dropout: EncSALayer(c, 2, dropout=dropout,
attention_dropout=0.0, relu_dropout=dropout,
kernel_size=hparams['enc_ffn_kernel_size'],
padding=hparams['ffn_padding']),
9: lambda c, dropout: EncSALayer(c, 4, dropout),
10: lambda c, dropout: EncSALayer(c, 8, dropout),
11: lambda c, dropout: EncLocalSALayer(c, 2, dropout),
12: lambda c, dropout: EncLSTMLayer(c, dropout),
13: lambda c, dropout, g_bias, tao: EncGausSALayer(c, 1, dropout, gaus_bias=g_bias, gaus_tao=tao),
14: lambda c, dropout: EncSALayer(c, 2, dropout, kernel_size=1),
15: lambda c, dropout: EncSALayer(c, 2, dropout, kernel_size=15),
}
OPERATIONS_DECODER = {
1: lambda c, dropout: DecConvLayer(c, 1, dropout),
2: lambda c, dropout: DecConvLayer(c, 5, dropout),
3: lambda c, dropout: DecConvLayer(c, 9, dropout),
4: lambda c, dropout: DecConvLayer(c, 13, dropout),
5: lambda c, dropout: DecConvLayer(c, 17, dropout),
6: lambda c, dropout: DecConvLayer(c, 21, dropout),
7: lambda c, dropout: DecConvLayer(c, 25, dropout),
8: lambda c, dropout: DecSALayer(c, 2, dropout=dropout,
attention_dropout=0.0, relu_dropout=dropout,
kernel_size=hparams['dec_ffn_kernel_size']),
9: lambda c, dropout: DecSALayer(c, 4, dropout),
10: lambda c, dropout: DecSALayer(c, 8, dropout),
11: lambda c, dropout: DecLSTMLayer(c, dropout),
12: lambda c, dropout: DecSALayer(c, 2, dropout, kernel_size=1),
}
| 53,222 | 40.64554 | 120 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/modules/priorgrad.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from modules.operations import *
from modules.tts_modules import DurationPredictor, LengthRegulator, PitchPredictor, EnergyPredictor,\
TransformerEncoderLayer, DEFAULT_MAX_SOURCE_POSITIONS
from modules.diffusion import DiffDecoder
from tts_utils.world_utils import f0_to_coarse_torch, restore_pitch
import numpy as np
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def override(self, attrs):
if isinstance(attrs, dict):
self.__dict__.update(**attrs)
elif isinstance(attrs, (list, tuple, set)):
for attr in attrs:
self.override(attr)
elif attrs is not None:
raise NotImplementedError
return self
class TransformerEncoder(nn.Module):
def __init__(self, arch, embed_tokens, last_ln=True):
super().__init__()
self.arch = arch
self.num_layers = hparams['enc_layers']
self.hidden_size = hparams['hidden_size']
self.embed_tokens = embed_tokens
self.padding_idx = embed_tokens.padding_idx
embed_dim = embed_tokens.embedding_dim
self.dropout = hparams['dropout']
self.embed_scale = math.sqrt(embed_dim)
self.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
self.embed_positions = SinusoidalPositionalEmbedding(
embed_dim, self.padding_idx,
init_size=self.max_source_positions + self.padding_idx + 1,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(self.arch[i], self.hidden_size, self.dropout)
for i in range(self.num_layers)
])
self.last_ln = last_ln
if last_ln:
self.layer_norm = LayerNorm(embed_dim)
def forward_embedding(self, src_tokens):
# embed tokens and positions
embed = self.embed_scale * self.embed_tokens(src_tokens)
positions = self.embed_positions(src_tokens)
# x = self.prenet(x)
x = embed + positions
x = F.dropout(x, p=self.dropout, training=self.training)
return x, embed
def forward(self, src_tokens):
"""
:param src_tokens: [B, T]
:return: {
'encoder_out': [T x B x C]
'encoder_padding_mask': [B x T]
'encoder_embedding': [B x T x C]
'attn_w': []
}
"""
x, encoder_embedding = self.forward_embedding(src_tokens)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx).data
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask=encoder_padding_mask)
if self.last_ln:
x = self.layer_norm(x)
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
'encoder_embedding': encoder_embedding, # B x T x C
'attn_w': []
}
class PriorGrad(nn.Module):
def __init__(self, arch, dictionary, out_dims=None):
super().__init__()
self.dictionary = dictionary
self.padding_idx = dictionary.pad()
if isinstance(arch, str):
self.arch = list(map(int, arch.strip().split()))
else:
assert isinstance(arch, (list, tuple))
self.arch = arch
self.enc_layers = hparams['enc_layers']
self.enc_arch = self.arch[:self.enc_layers]
self.hidden_size = hparams['hidden_size']
self.encoder_embed_tokens = nn.Embedding(len(self.dictionary), self.hidden_size, self.padding_idx)
self.encoder = TransformerEncoder(self.enc_arch, self.encoder_embed_tokens)
self.decoder_proj_dim = hparams['decoder_proj_dim'] if 'decoder_proj_dim' in hparams else self.hidden_size
if self.decoder_proj_dim != self.hidden_size:
self.decoder_proj = Linear(self.hidden_size, hparams['decoder_proj_dim'], bias=True)
self.use_phone_stat = hparams['use_phone_stat'] if 'use_phone_stat' in hparams else False
self.condition_phone_stat = hparams['condition_phone_stat'] if 'condition_phone_stat' in hparams else False
self.diff_params = AttrDict(
n_mels = hparams['audio_num_mel_bins'],
residual_layers = hparams['diff_residual_layers'],
residual_channels = hparams['diff_residual_channels'],
conditioner_channels = self.decoder_proj_dim,
dilation_cycle_length = 1,
noise_schedule = np.linspace(hparams['diff_beta_start'], hparams['diff_beta_end'], hparams['diff_num_steps']).tolist(),
inference_noise_schedule = hparams['diff_inference_noise_schedule'],
use_phone_stat = self.use_phone_stat,
condition_phone_stat = self.condition_phone_stat
)
self.diff_beta = np.array(self.diff_params.noise_schedule)
noise_level = np.cumprod(1 - self.diff_beta)
self.diff_noise_level = torch.tensor(noise_level.astype(np.float32))
self.decoder = DiffDecoder(self.diff_params)
if hparams['use_spk_id']:
self.spk_embed_proj = nn.Embedding(hparams['num_spk'], self.hidden_size)
else:
self.spk_embed_proj = Linear(256, self.hidden_size, bias=True)
self.dur_predictor = DurationPredictor(
self.hidden_size,
n_chans=hparams['predictor_hidden'],
dropout_rate=0.5, padding=hparams['ffn_padding'],
kernel_size=hparams['dur_predictor_kernel'])
self.length_regulator = LengthRegulator()
if hparams['use_pitch_embed']:
self.pitch_embed = nn.Embedding(300, self.hidden_size, self.padding_idx)
self.pitch_predictor = PitchPredictor(
self.hidden_size, n_chans=hparams['predictor_hidden'], dropout_rate=0.5,
padding=hparams['ffn_padding'], odim=2)
self.pitch_do = nn.Dropout(0.5)
if hparams['use_energy_embed']:
self.energy_predictor = EnergyPredictor(
self.hidden_size, n_chans=hparams['predictor_hidden'], dropout_rate=0.5, odim=1,
padding=hparams['ffn_padding'])
self.energy_embed = nn.Embedding(256, self.hidden_size, self.padding_idx)
self.energy_do = nn.Dropout(0.5)
# encoder proj for MAS
if hparams['dur'] == 'mas':
print("INFO: using monotonic alignment search (MAS) for duration predictor training!")
self.encoder_proj = Linear(self.hidden_size, hparams['audio_num_mel_bins'], bias=True)
def forward(self, src_tokens, mel2ph, spk_embed=None,
ref_mels=None, target_mean=None, target_std=None, target_nonpadding=None, pitch=None, uv=None, energy=None,
is_training=True, fast_sampling=False, skip_decoder=False):
"""
:param src_tokens: [B, T]
:param mel2ph:
:param spk_embed:
:param ref_mels:
:param target_nonpadding: pre-computed mask for ref_mels
:return: {
'mel_out': [B, T_s, 80], 'dur': [B, T_t],
'w_st_pred': [heads, B, tokens], 'w_st': [heads, B, tokens],
'encoder_out_noref': [B, T_t, H]
}
"""
B, T_text = src_tokens.shape
ret = {}
encoder_outputs = self.encoder(src_tokens)
encoder_out = encoder_outputs['encoder_out'] # [T, B, C]
src_nonpadding = (src_tokens > 0).float().permute(1, 0)[:, :, None]
if hparams['use_spk_embed'] and spk_embed is not None:
spk_embed = self.spk_embed_proj(spk_embed)[None, :, :]
encoder_out += spk_embed
encoder_out = encoder_out * src_nonpadding # [T, B, C]
dur_input = encoder_out.transpose(0, 1)
if hparams['predictor_sg']:
dur_input = dur_input.detach()
if hparams['dur'] == 'mfa': # original FS2 with GT mel2ph
if mel2ph is None:
dur = self.dur_predictor.inference(dur_input, src_tokens == 0)
ret['phoneme_aligned'] = torch.repeat_interleave(src_tokens[0], dur[0]).unsqueeze(0)
if not hparams['sep_dur_loss']:
dur[src_tokens == self.dictionary.seg()] = 0
ret['mel2ph'] = mel2ph = self.length_regulator(dur, (src_tokens != 0).sum(-1))[..., 0]
else:
ret['dur'] = self.dur_predictor(dur_input, src_tokens == 0)
elif hparams['dur'] == 'mas': # modified FS2 with MAS, and without access to GT duration
import monotonic_align
if mel2ph is None:
dur = self.dur_predictor.inference(dur_input, src_tokens == 0)
ret['phoneme_aligned'] = torch.repeat_interleave(src_tokens[0], dur[0]).unsqueeze(0)
if not hparams['sep_dur_loss']:
dur[src_tokens == self.dictionary.seg()] = 0
ret['mel2ph'] = mel2ph = self.length_regulator(dur, (src_tokens != 0).sum(-1))[..., 0]
encoder_proj = self.encoder_proj(encoder_out)
encoder_proj_aligned = F.pad(encoder_proj, [0, 0, 0, 0, 1, 0])
mel2ph_ = mel2ph.permute([1, 0])[..., None].repeat([1, 1, encoder_proj_aligned.shape[-1]]).contiguous()
encoder_proj_aligned = torch.gather(encoder_proj_aligned, 0, mel2ph_).transpose(0, 1) # [B, T_mel, 80]
ret['encoder_proj_aligned'] = encoder_proj_aligned # for POC inference
else:
# make sure that we don't mistakenly use MFA mel2ph
mel2ph = None
# MAS requires [B, T_text, T_mel] "score-like" tensor to calculate optimal path
# build [B, T_text, T_mel] l2-distance tensor and run search
encoder_proj = self.encoder_proj(encoder_out) * src_nonpadding # [T_text, B, 80] * [T_text, B, 1]
# L2 distance btw [B, T_text, 80] and [B, T_mel, 80] with cdist
distance_matrix_l2 = torch.cdist(encoder_proj.transpose(0, 1), ref_mels, p=2) # [B, T_text, T_mel]
# search for optimal path btw encoder output & target mel using MAS, with negative distance as input
with torch.no_grad():
src_nonpadding_mas = (src_tokens > 0).float() # [B, T_text]
attn_mask = torch.unsqueeze(src_nonpadding_mas, -1) * torch.unsqueeze(target_nonpadding, 1) # [B, T_text, T_mel]
optimal_path = monotonic_align.maximum_path(-distance_matrix_l2, attn_mask) # [B, T_text, T_mel]
optimal_path = optimal_path.detach()
# get "GT" token duration from MAS: sum over T_mel dim to get linear duration & apply source mask
dur_gt_mas = torch.sum(optimal_path, -1).long() * src_nonpadding_mas.long()
# get mel2ph using dur_gt_mas with length regulator module, this overwrites MFA-given mel2ph
# duration loss will be computed with ret['dur'] and ret['mel2ph_mas']
ret['mel2ph_mas'] = mel2ph = self.length_regulator(dur_gt_mas, (src_tokens !=0).sum(-1))[..., 0]
encoder_proj_aligned = torch.matmul(optimal_path.transpose(1, 2), encoder_proj.transpose(0, 1)) # [B, T_mel, 80]
ret['encoder_proj_aligned'] = encoder_proj_aligned # to be used for encoder loss
ret['dur'] = self.dur_predictor(dur_input, src_tokens == 0)
# expand encoder out to make decoder inputs
decoder_inp = F.pad(encoder_out, [0, 0, 0, 0, 1, 0])
mel2ph_ = mel2ph.permute([1, 0])[..., None].repeat([1, 1, encoder_out.shape[-1]]).contiguous()
decoder_inp = torch.gather(decoder_inp, 0, mel2ph_).transpose(0, 1) # [B, T, H]
ret['decoder_inp_origin'] = decoder_inp_origin = decoder_inp
# add pitch embed
if hparams['use_pitch_embed']:
decoder_inp = decoder_inp + self.add_pitch(decoder_inp_origin, pitch, uv, mel2ph, ret)
# add energy embed
if hparams['use_energy_embed']:
decoder_inp = decoder_inp + self.add_energy(decoder_inp_origin, energy, ret)
if self.decoder_proj_dim != self.hidden_size:
decoder_inp = self.decoder_proj(decoder_inp) # [T, B, proj_dim]
decoder_inp = decoder_inp * (mel2ph != 0).float()[:, :, None]
ret['decoder_inp'] = decoder_inp
if skip_decoder:
return ret
# run diffusion
if self.use_phone_stat:
assert target_mean is not None and target_std is not None, "use_phone_stat is true but mean and std are None"
# expand target_mean and target_std accordingly
mel2ph__ = mel2ph.permute([1, 0])[..., None].repeat([1, 1, target_mean.shape[-1]]).contiguous()
target_mean_ = F.pad(target_mean.permute(1, 0, 2), [0, 0, 0, 0, 1, 0])
target_mean_ = torch.gather(target_mean_, 0, mel2ph__).transpose(0, 1)
target_std_ = F.pad(target_std.permute(1, 0, 2), [0, 0, 0, 0, 1, 0], value=1.)
target_std_ = torch.gather(target_std_, 0, mel2ph__).transpose(0, 1)
target_mean, target_std = target_mean_, target_std_
ret['target_mean_aligned'] = target_mean
ret['target_std_aligned'] = target_std
# use X-mu, zero-mean data for training, and add mean at the last step of diffusion
if is_training: # compute diffusion loss
t = torch.randint(0, len(self.diff_params.noise_schedule), [B])
noise_scale = self.diff_noise_level[t].unsqueeze(-1).unsqueeze(-1).to(src_tokens.device)
noise_scale_sqrt = noise_scale ** 0.5
if self.use_phone_stat:
noise = torch.randn_like(ref_mels) * target_std
noisy_mel = noise_scale_sqrt * (ref_mels - target_mean) + (1.0 - noise_scale) ** 0.5 * noise # use X - mu
else:
noise = torch.randn_like(ref_mels)
noisy_mel = noise_scale_sqrt * ref_mels + (1.0 - noise_scale) ** 0.5 * noise
noise_pred = self.decoder(noisy_mel, decoder_inp, target_mean, target_std, mel2ph, t) # we can still use mu and std as condition, this will predict N(0, Sigma)
ret['noise_pred'] = noise_pred
if target_mean is not None:
ret['noise_target'] = (noise) * (mel2ph != 0).float()[:, :, None] # apply mask to noise for correct loss, noise is already
else:
ret['noise_target'] = noise * (mel2ph != 0).float()[:, :, None]
else: # run reverse diffusion sampling
x_mel, x_mel_list = self.decoder.sample(decoder_inp, target_mean, target_std, mel2ph, fast_sampling=fast_sampling, return_all=True)
x_mel = x_mel * (mel2ph != 0).float()[:, :, None]
ret['mel_out'] = x_mel
return ret
def decode_with_pred_pitch(self, decoder_inp, mel2ph):
if hparams['use_ref_enc']:
assert False
pitch_embed = self.add_pitch(decoder_inp, None, None, mel2ph, {})
decoder_inp = decoder_inp + self.pitch_do(pitch_embed)
decoder_inp = decoder_inp * (mel2ph != 0).float()[:, :, None]
x = decoder_inp
x = self.decoder(x)
x = self.mel_out(x)
x = x * (mel2ph != 0).float()[:, :, None]
return x
# run other modules
def add_energy(self, decoder_inp, energy, ret):
if hparams['predictor_sg']:
decoder_inp = decoder_inp.detach()
ret['energy_pred'] = energy_pred = self.energy_predictor(decoder_inp)[:, :, 0]
if energy is None:
energy = energy_pred
energy = torch.clamp(torch.div(energy * 256, 4, rounding_mode='floor'), min = 0, max=255).long()
energy_embed = self.energy_embed(energy)
return energy_embed
def add_pitch(self, decoder_inp_origin, pitch, uv, mel2ph, ret):
pp_inp = decoder_inp_origin
if hparams['predictor_sg']:
pp_inp = pp_inp.detach()
ret['pitch_logits'] = pitch_logits = self.pitch_predictor(pp_inp)
if pitch is not None: # train
pitch_padding = pitch == -200
pitch_restore = restore_pitch(pitch, uv if hparams['use_uv'] else None, hparams,
pitch_padding=pitch_padding)
ret['pitch'] = pitch_restore
pitch_restore = f0_to_coarse_torch(pitch_restore)
pitch_embed = self.pitch_embed(pitch_restore)
else: # test
pitch_padding = (mel2ph == 0)
pitch = pitch_logits[:, :, 0]
uv = pitch_logits[:, :, 1] > 0
if not hparams['use_uv']:
uv = pitch < -3.5
pitch_restore = restore_pitch(pitch, uv, hparams, pitch_padding=pitch_padding)
ret['pitch'] = pitch_restore
ret['uv'] = uv
pitch_restore = f0_to_coarse_torch(pitch_restore)
pitch_embed = self.pitch_embed(pitch_restore)
return self.pitch_do(pitch_embed)
| 17,248 | 46.913889 | 171 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/tts_utils/stft.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
import librosa.util as librosa_util
from torchaudio.transforms import MelSpectrogram
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm) ** 2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window is not None:
assert (win_length >= filter_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length / 2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length / 2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction | 6,068 | 35.341317 | 97 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/tts_utils/pl_utils.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import matplotlib
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
matplotlib.use('Agg')
import glob
import itertools
import subprocess
import threading
import traceback
from pytorch_lightning.callbacks import GradientAccumulationScheduler
from pytorch_lightning.callbacks import ModelCheckpoint
from functools import wraps
from torch.cuda._utils import _get_device_index
import numpy as np
import torch.optim
import torch.utils.data
import copy
import logging
import os
import re
import sys
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import tqdm
from torch.optim.optimizer import Optimizer
def get_a_var(obj): # pragma: no cover
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def data_loader(fn):
"""
Decorator to make any fx with this use the lazy property
:param fn:
:return:
"""
wraps(fn)
attr_name = '_lazy_' + fn.__name__
def _get_data_loader(self):
try:
value = getattr(self, attr_name)
except AttributeError:
try:
value = fn(self) # Lazy evaluation, done only once.
if (
value is not None and
not isinstance(value, list) and
fn.__name__ in ['test_dataloader', 'val_dataloader']
):
value = [value]
except AttributeError as e:
# Guard against AttributeError suppression. (Issue #142)
traceback.print_exc()
error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e)
raise RuntimeError(error) from e
setattr(self, attr_name, value) # Memoize evaluation.
return value
return _get_data_loader
def parallel_apply(modules, inputs, kwargs_tup=None, devices=None): # pragma: no cover
r"""Applies each `module` in :attr:`modules` in parallel on arguments
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
on each of :attr:`devices`.
Args:
modules (Module): modules to be parallelized
inputs (tensor): inputs to the modules
devices (list of int or torch.device): CUDA devices
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
:attr:`devices` (if given) should all have same length. Moreover, each
element of :attr:`inputs` can either be a single object as the only argument
to a module, or a collection of positional arguments.
"""
assert len(modules) == len(inputs)
if kwargs_tup is not None:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
devices = list(map(lambda x: _get_device_index(x, True), devices))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, kwargs, device=None):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
# ---------------
# CHANGE
if module.training:
output = module.training_step(*input, **kwargs)
elif module.testing:
output = module.test_step(*input, **kwargs)
else:
output = module.validation_step(*input, **kwargs)
# ---------------
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
# TODO: fix hack (maybe not a hack)
# make sure each module knows what training state it's in...
# fixes weird bug where copies are out of sync
root_m = modules[0]
for m in modules[1:]:
m.training = root_m.training
m.testing = root_m.testing
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, kwargs, device))
for i, (module, input, kwargs, device) in
enumerate(zip(modules, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
def _find_tensors(obj): # pragma: no cover
r"""
Recursively find all tensors contained in the specified object.
"""
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
class DDP(DistributedDataParallel):
"""
Override the forward call in lightning so it goes to training and validation step respectively
"""
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def forward(self, *inputs, **kwargs): # pragma: no cover
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
# --------------
# LIGHTNING MOD
# --------------
# normal
# output = self.module(*inputs[0], **kwargs[0])
# lightning
if self.module.training:
output = self.module.training_step(*inputs[0], **kwargs[0])
elif self.module.testing:
output = self.module.test_step(*inputs[0], **kwargs[0])
else:
output = self.module.validation_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
# normal
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled():
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
return output
class DP(DataParallel):
"""
Override the forward call in lightning so it goes to training and validation step respectively
"""
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
for t in itertools.chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(self.src_device_obj, t.device))
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
# lightning
if self.module.training:
return self.module.training_step(*inputs[0], **kwargs[0])
elif self.module.testing:
return self.module.test_step(*inputs[0], **kwargs[0])
else:
return self.module.validation_step(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
class GradientAccumulationScheduler:
def __init__(self, scheduling: dict):
if scheduling == {}: # empty dict error
raise TypeError("Empty dict cannot be interpreted correct")
for key in scheduling.keys():
if not isinstance(key, int) or not isinstance(scheduling[key], int):
raise TypeError("All epoches and accumulation factor must be integers")
minimal_epoch = min(scheduling.keys())
if minimal_epoch < 1:
msg = f"Epochs indexing from 1, epoch {minimal_epoch} cannot be interpreted correct"
raise IndexError(msg)
elif minimal_epoch != 1: # if user didnt define first epoch accumulation factor
scheduling.update({1: 1})
self.scheduling = scheduling
self.epochs = sorted(scheduling.keys())
def on_epoch_begin(self, epoch, trainer):
epoch += 1 # indexing epochs from 1
for i in reversed(range(len(self.epochs))):
if epoch >= self.epochs[i]:
trainer.accumulate_grad_batches = self.scheduling.get(self.epochs[i])
break
class LatestModelCheckpoint(ModelCheckpoint):
def __init__(self, filepath, monitor='val_loss', verbose=0, num_keep=5, save_weights_only=False,
mode='auto', period=1, prefix='model'):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
os.makedirs(filepath, exist_ok=True)
self.num_keep = num_keep
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_check = 0
self.prefix = prefix
self.best_k_models = {}
# {filename: monitor}
self.kth_best_model = ''
self.save_top_k = 1
self.task = None
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
self.mode = 'min'
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
self.mode = 'max'
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
self.mode = 'max'
else:
self.monitor_op = np.less
self.best = np.Inf
self.mode = 'min'
if os.path.exists(f'{self.filepath}/best_valid.npy'):
self.best = np.load(f'{self.filepath}/best_valid.npy')[0]
def _save_model(self, filepath):
dirpath = os.path.dirname(filepath)
# make paths
os.makedirs(dirpath, exist_ok=True)
# delegate the saving to the model
self.save_function(filepath)
def get_all_ckpts(self):
return sorted(glob.glob(f'{self.filepath}/{self.prefix}_ckpt_steps_*.ckpt'),
key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0]))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_check += 1
best_filepath = f'{self.filepath}/{self.prefix}_ckpt_best.pt'
if self.epochs_since_last_check >= self.period:
self.epochs_since_last_check = 0
filepath = f'{self.filepath}/{self.prefix}_ckpt_steps_{self.task.global_step}.ckpt'
if self.verbose > 0:
logging.info(f'Epoch {epoch:05d}@{self.task.global_step}: saving model to {filepath}')
self._save_model(filepath)
for old_ckpt in self.get_all_ckpts()[5:]:
subprocess.check_call(f'rm -rf "{old_ckpt}"', shell=True)
if self.verbose > 0:
logging.info(f'Delete ckpt: {os.path.basename(old_ckpt)}')
current = logs.get(self.monitor)
if current is not None:
if self.monitor_op(current, self.best):
self.best = current
if self.verbose > 0:
logging.info(
f'Epoch {epoch:05d}@{self.task.global_step}: {self.monitor} reached'
f' {current:0.5f} (best {self.best:0.5f}), saving model to'
f' {best_filepath} as top 1')
self._save_model(best_filepath)
np.save(f'{self.filepath}/best_valid.npy', [self.best])
class BaseTrainer:
def __init__(
self,
logger=True,
checkpoint_callback=True,
default_save_path=None,
gradient_clip_val=0,
process_position=0,
gpus=-1,
log_gpu_memory=None,
show_progress_bar=True,
track_grad_norm=-1,
check_val_every_n_epoch=1,
accumulate_grad_batches=1,
max_updates=1000,
min_epochs=1,
val_check_interval=1.0,
log_save_interval=100,
row_log_interval=10,
print_nan_grads=False,
weights_summary='full',
num_sanity_val_steps=5,
resume_from_checkpoint=None,
):
self.log_gpu_memory = log_gpu_memory
self.gradient_clip_val = gradient_clip_val
self.check_val_every_n_epoch = check_val_every_n_epoch
self.track_grad_norm = track_grad_norm
self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
self.process_position = process_position
self.weights_summary = weights_summary
self.max_updates = max_updates
self.min_epochs = min_epochs
self.num_sanity_val_steps = num_sanity_val_steps
self.print_nan_grads = print_nan_grads
self.resume_from_checkpoint = resume_from_checkpoint
self.default_save_path = default_save_path
# training bookeeping
self.total_batch_idx = 0
self.running_loss = []
self.avg_loss = 0
self.batch_idx = 0
self.tqdm_metrics = {}
self.callback_metrics = {}
self.num_val_batches = 0
self.num_training_batches = 0
self.num_test_batches = 0
self.get_train_dataloader = None
self.get_test_dataloaders = None
self.get_val_dataloaders = None
self.is_iterable_train_dataloader = False
# training state
self.model = None
self.testing = False
self.disable_validation = False
self.lr_schedulers = []
self.optimizers = None
self.global_step = 0
self.current_epoch = 0
self.total_batches = 0
# configure checkpoint callback
self.checkpoint_callback = checkpoint_callback
self.checkpoint_callback.save_function = self.save_checkpoint
self.weights_save_path = self.checkpoint_callback.filepath
# accumulated grads
self.configure_accumulated_gradients(accumulate_grad_batches)
# allow int, string and gpu list
self.data_parallel_device_ids = [int(x) for x in os.environ["CUDA_VISIBLE_DEVICES"].split(",")]
self.root_gpu = self.data_parallel_device_ids[0]
# distributed backend choice
self.use_ddp = False
self.use_dp = False
self.single_gpu = False
self.distributed_backend = 'ddp' if self.num_gpus > 0 else 'dp'
self.set_distributed_mode(self.distributed_backend)
self.proc_rank = 0
self.world_size = 1
self.node_rank = 0
# can't init progress bar here because starting a new process
# means the progress_bar won't survive pickling
self.show_progress_bar = show_progress_bar
# logging
self.log_save_interval = log_save_interval
self.val_check_interval = val_check_interval
self.logger = logger
self.logger.rank = 0
self.row_log_interval = row_log_interval
@property
def num_gpus(self):
gpus = self.data_parallel_device_ids
if gpus is None:
return 0
else:
return len(gpus)
@property
def data_parallel(self):
return self.use_dp or self.use_ddp
def get_model(self):
is_dp_module = isinstance(self.model, (DDP, DP))
model = self.model.module if is_dp_module else self.model
return model
# -----------------------------
# MODEL TRAINING
# -----------------------------
def fit(self, model):
if self.use_ddp:
mp.spawn(self.ddp_train, nprocs=self.num_gpus, args=(model,))
elif self.use_dp:
self.dp_train(model)
elif self.single_gpu:
self.single_gpu_train(model)
else:
assert False, "GPU not found"
return 1
def init_optimizers(self, optimizers):
# single optimizer
if isinstance(optimizers, Optimizer):
return [optimizers], []
# two lists
elif len(optimizers) == 2 and isinstance(optimizers[0], list):
optimizers, lr_schedulers = optimizers
return optimizers, lr_schedulers
# single list or tuple
elif isinstance(optimizers, list) or isinstance(optimizers, tuple):
return optimizers, []
def run_pretrain_routine(self, model):
"""Sanity check a few things before starting actual training.
:param model:
"""
ref_model = model
if self.data_parallel:
ref_model = model.module
# give model convenience properties
ref_model.trainer = self
# set local properties on the model
self.copy_trainer_model_properties(ref_model)
# link up experiment object
if self.logger is not None:
ref_model.logger = self.logger
self.logger.save()
if self.use_ddp:
dist.barrier()
# set up checkpoint callback
# self.configure_checkpoint_callback()
# transfer data loaders from model
self.get_dataloaders(ref_model)
# track model now.
# if cluster resets state, the model will update with the saved weights
self.model = model
# restore training and model before hpc call
self.restore_weights(model)
# when testing requested only run test and return
if self.testing:
self.run_evaluation(test=True)
return
# check if we should run validation during training
self.disable_validation = self.num_val_batches == 0
# run tiny validation (if validation defined)
# to make sure program won't crash during val
ref_model.on_sanity_check_start()
ref_model.on_train_start()
if not self.disable_validation and self.num_sanity_val_steps > 0:
# init progress bars for validation sanity check
pbar = tqdm.tqdm(desc='Validation sanity check',
total=self.num_sanity_val_steps * len(self.get_val_dataloaders()),
leave=False, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch')
self.main_progress_bar = pbar
# dummy validation progress bar
self.val_progress_bar = tqdm.tqdm(disable=True)
self.evaluate(model, self.get_val_dataloaders(), self.num_sanity_val_steps, self.testing)
# close progress bars
self.main_progress_bar.close()
self.val_progress_bar.close()
# init progress bar
pbar = tqdm.tqdm(leave=True, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch',
file=sys.stdout)
self.main_progress_bar = pbar
# clear cache before training
if self.on_gpu:
torch.cuda.empty_cache()
# CORE TRAINING LOOP
self.train()
def test(self, model):
self.testing = True
self.fit(model)
@property
def training_tqdm_dict(self):
tqdm_dict = {
'step': '{}'.format(self.global_step),
}
tqdm_dict.update(self.tqdm_metrics)
return tqdm_dict
# --------------------
# restore ckpt
# --------------------
def restore_weights(self, model):
"""
To restore weights we have two cases.
First, attempt to restore hpc weights. If successful, don't restore
other weights.
Otherwise, try to restore actual weights
:param model:
:return:
"""
# clear cache before restore
if self.on_gpu:
torch.cuda.empty_cache()
if self.resume_from_checkpoint is not None:
self.restore(self.resume_from_checkpoint, on_gpu=self.on_gpu)
else:
# restore weights if same exp version
self.restore_state_if_checkpoint_exists(model)
# wait for all models to restore weights
if self.use_ddp:
# wait for all processes to catch up
dist.barrier()
# clear cache after restore
if self.on_gpu:
torch.cuda.empty_cache()
def restore_state_if_checkpoint_exists(self, model):
did_restore = False
# do nothing if there's not dir or callback
no_ckpt_callback = (self.checkpoint_callback is None) or (not self.checkpoint_callback)
if no_ckpt_callback or not os.path.exists(self.checkpoint_callback.filepath):
return did_restore
# restore trainer state and model if there is a weight for this experiment
last_steps = -1
last_ckpt_name = None
# find last epoch
checkpoints = os.listdir(self.checkpoint_callback.filepath)
print("available file lists:")
print(checkpoints)
for name in checkpoints:
if '.ckpt' in name:
if 'steps_' in name:
steps = name.split('steps_')[1]
steps = int(re.sub('[^0-9]', '', steps))
if steps > last_steps:
last_steps = steps
last_ckpt_name = name
# restore last checkpoint
if last_ckpt_name is not None:
last_ckpt_path = os.path.join(self.checkpoint_callback.filepath, last_ckpt_name)
self.restore(last_ckpt_path, self.on_gpu)
logging.info(f'model and trainer restored from checkpoint: {last_ckpt_path}')
did_restore = True
return did_restore
def restore(self, checkpoint_path, on_gpu):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
# load model state
model = self.get_model()
# load the state_dict on the model automatically
model.load_state_dict(checkpoint['state_dict'])
if on_gpu:
model.cuda(self.root_gpu)
# load training state (affects trainer only)
self.restore_training_state(checkpoint)
model.global_step = self.global_step
del checkpoint
try:
if dist.is_initialized() and dist.get_rank() > 0:
return
except Exception as e:
print(e)
return
def restore_training_state(self, checkpoint):
"""
Restore trainer state.
Model will get its change to update
:param checkpoint:
:return:
"""
if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
self.checkpoint_callback.best = checkpoint['checkpoint_callback_best']
self.global_step = checkpoint['global_step']
self.current_epoch = checkpoint['epoch']
# restore the optimizers
optimizer_states = checkpoint['optimizer_states']
for optimizer, opt_state in zip(self.optimizers, optimizer_states):
optimizer.load_state_dict(opt_state)
# move optimizer to GPU 1 weight at a time
# avoids OOM
if self.root_gpu is not None:
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda(self.root_gpu)
# restore the lr schedulers
lr_schedulers = checkpoint['lr_schedulers']
for scheduler, lrs_state in zip(self.lr_schedulers, lr_schedulers):
scheduler.load_state_dict(lrs_state)
# --------------------
# MODEL SAVE CHECKPOINT
# --------------------
def _atomic_save(self, checkpoint, filepath):
"""Saves a checkpoint atomically, avoiding the creation of incomplete checkpoints.
This will create a temporary checkpoint with a suffix of ``.part``, then copy it to the final location once
saving is finished.
Args:
checkpoint (object): The object to save.
Built to be used with the ``dump_checkpoint`` method, but can deal with anything which ``torch.save``
accepts.
filepath (str|pathlib.Path): The path to which the checkpoint will be saved.
This points to the file that the checkpoint will be stored in.
"""
tmp_path = str(filepath) + ".part"
torch.save(checkpoint, tmp_path)
os.replace(tmp_path, filepath)
def save_checkpoint(self, filepath):
checkpoint = self.dump_checkpoint()
self._atomic_save(checkpoint, filepath)
def dump_checkpoint(self):
checkpoint = {
'epoch': self.current_epoch,
'global_step': self.global_step
}
if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
checkpoint['checkpoint_callback_best'] = self.checkpoint_callback.best
# save optimizers
optimizer_states = []
for i, optimizer in enumerate(self.optimizers):
optimizer_states.append(optimizer.state_dict())
checkpoint['optimizer_states'] = optimizer_states
# save lr schedulers
lr_schedulers = []
for i, scheduler in enumerate(self.lr_schedulers):
lr_schedulers.append(scheduler.state_dict())
checkpoint['lr_schedulers'] = lr_schedulers
# add the hparams and state_dict from the model
model = self.get_model()
checkpoint['state_dict'] = model.state_dict()
# give the model a chance to add a few things
model.on_save_checkpoint(checkpoint)
return checkpoint
def copy_trainer_model_properties(self, model):
if isinstance(model, DP):
ref_model = model.module
elif isinstance(model, DDP):
ref_model = model.module
else:
ref_model = model
for m in [model, ref_model]:
m.trainer = self
m.on_gpu = self.on_gpu
m.use_dp = self.use_dp
m.use_ddp = self.use_ddp
m.testing = self.testing
m.single_gpu = self.single_gpu
def transfer_batch_to_gpu(self, batch, gpu_id):
# base case: object can be directly moved using `cuda` or `to`
if callable(getattr(batch, 'cuda', None)):
return batch.cuda(gpu_id)
elif callable(getattr(batch, 'to', None)):
return batch.to(torch.device('cuda', gpu_id))
# when list
elif isinstance(batch, list):
for i, x in enumerate(batch):
batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
return batch
# when tuple
elif isinstance(batch, tuple):
batch = list(batch)
for i, x in enumerate(batch):
batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
return tuple(batch)
# when dict
elif isinstance(batch, dict):
for k, v in batch.items():
batch[k] = self.transfer_batch_to_gpu(v, gpu_id)
return batch
# nothing matches, return the value as is without transform
return batch
def single_gpu_train(self, model):
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
model.cuda(self.root_gpu)
self.run_pretrain_routine(model)
def dp_train(self, model):
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
model.cuda(self.root_gpu)
# create list of device ids
device_ids = self.data_parallel_device_ids
model = DP(model, device_ids=device_ids)
self.run_pretrain_routine(model)
def set_distributed_mode(self, distributed_backend):
# skip for CPU
if self.num_gpus == 0:
return
# single GPU case
# in single gpu case we allow ddp so we can train on multiple
# nodes, 1 gpu per node
elif self.num_gpus == 1:
self.single_gpu = True
self.use_dp = False
self.use_ddp = False
self.root_gpu = 0
self.data_parallel_device_ids = [0]
else:
if distributed_backend is not None:
self.use_dp = distributed_backend == 'dp'
self.use_ddp = distributed_backend == 'ddp'
elif distributed_backend is None:
self.use_dp = True
self.use_ddp = False
logging.info(f'gpu available: {torch.cuda.is_available()}, used: {self.on_gpu}')
def ddp_train(self, gpu_idx, model):
"""
Entry point into a DP thread
:param gpu_idx:
:param model:
:param cluster_obj:
:return:
"""
# otherwise default to node rank 0
self.node_rank = 0
# show progressbar only on progress_rank 0
self.show_progress_bar = self.show_progress_bar and self.node_rank == 0 and gpu_idx == 0
# determine which process we are and world size
if self.use_ddp:
self.proc_rank = self.node_rank * self.num_gpus + gpu_idx
self.world_size = self.num_gpus
# let the exp know the rank to avoid overwriting logs
if self.logger is not None:
self.logger.rank = self.proc_rank
# set up server using proc 0's ip address
# try to init for 20 times at max in case ports are taken
# where to store ip_table
model.trainer = self
model.init_ddp_connection(self.proc_rank, self.world_size)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
# MODEL
# copy model to each gpu
if self.distributed_backend == 'ddp':
torch.cuda.set_device(gpu_idx)
model.cuda(gpu_idx)
# set model properties before going into wrapper
self.copy_trainer_model_properties(model)
# override root GPU
self.root_gpu = gpu_idx
if self.distributed_backend == 'ddp':
device_ids = [gpu_idx]
else:
device_ids = None
# allow user to configure ddp
model = model.configure_ddp(model, device_ids)
# continue training routine
self.run_pretrain_routine(model)
def resolve_root_node_address(self, root_node):
if '[' in root_node:
name = root_node.split('[')[0]
number = root_node.split(',')[0]
if '-' in number:
number = number.split('-')[0]
number = re.sub('[^0-9]', '', number)
root_node = name + number
return root_node
def log_metrics(self, metrics, grad_norm_dic, step=None):
"""Logs the metric dict passed in.
:param metrics:
:param grad_norm_dic:
"""
# added metrics by Lightning for convenience
metrics['epoch'] = self.current_epoch
# add norms
metrics.update(grad_norm_dic)
# turn all tensors to scalars
scalar_metrics = self.metrics_to_scalars(metrics)
step = step if step is not None else self.global_step
# log actual metrics
if self.proc_rank == 0 and self.logger is not None:
self.logger.log_metrics(scalar_metrics, step=step)
self.logger.save()
def add_tqdm_metrics(self, metrics):
for k, v in metrics.items():
if type(v) is torch.Tensor:
v = v.item()
self.tqdm_metrics[k] = v
def metrics_to_scalars(self, metrics):
new_metrics = {}
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
if type(v) is dict:
v = self.metrics_to_scalars(v)
new_metrics[k] = v
return new_metrics
def process_output(self, output, train=False):
"""Reduces output according to the training mode.
Separates loss from logging and tqdm metrics
:param output:
:return:
"""
# ---------------
# EXTRACT CALLBACK KEYS
# ---------------
# all keys not progress_bar or log are candidates for callbacks
callback_metrics = {}
for k, v in output.items():
if k not in ['progress_bar', 'log', 'hiddens']:
callback_metrics[k] = v
if train and self.use_dp:
num_gpus = self.num_gpus
callback_metrics = self.reduce_distributed_output(callback_metrics, num_gpus)
for k, v in callback_metrics.items():
if isinstance(v, torch.Tensor):
callback_metrics[k] = v.item()
# ---------------
# EXTRACT PROGRESS BAR KEYS
# ---------------
try:
progress_output = output['progress_bar']
# reduce progress metrics for tqdm when using dp
if train and self.use_dp:
num_gpus = self.num_gpus
progress_output = self.reduce_distributed_output(progress_output, num_gpus)
progress_bar_metrics = progress_output
except Exception:
progress_bar_metrics = {}
# ---------------
# EXTRACT LOGGING KEYS
# ---------------
# extract metrics to log to experiment
try:
log_output = output['log']
# reduce progress metrics for tqdm when using dp
if train and self.use_dp:
num_gpus = self.num_gpus
log_output = self.reduce_distributed_output(log_output, num_gpus)
log_metrics = log_output
except Exception:
log_metrics = {}
# ---------------
# EXTRACT LOSS
# ---------------
# if output dict doesn't have the keyword loss
# then assume the output=loss if scalar
loss = None
if train:
try:
loss = output['loss']
except Exception:
if type(output) is torch.Tensor:
loss = output
else:
raise RuntimeError(
'No `loss` value in the dictionary returned from `model.training_step()`.'
)
# when using dp need to reduce the loss
if self.use_dp:
loss = self.reduce_distributed_output(loss, self.num_gpus)
# ---------------
# EXTRACT HIDDEN
# ---------------
hiddens = output.get('hiddens')
# use every metric passed in as a candidate for callback
callback_metrics.update(progress_bar_metrics)
callback_metrics.update(log_metrics)
# convert tensors to numpy
for k, v in callback_metrics.items():
if isinstance(v, torch.Tensor):
callback_metrics[k] = v.item()
return loss, progress_bar_metrics, log_metrics, callback_metrics, hiddens
def reduce_distributed_output(self, output, num_gpus):
if num_gpus <= 1:
return output
# when using DP, we get one output per gpu
# average outputs and return
if type(output) is torch.Tensor:
return output.mean()
for k, v in output.items():
# recurse on nested dics
if isinstance(output[k], dict):
output[k] = self.reduce_distributed_output(output[k], num_gpus)
# do nothing when there's a scalar
elif isinstance(output[k], torch.Tensor) and output[k].dim() == 0:
pass
# reduce only metrics that have the same number of gpus
elif output[k].size(0) == num_gpus:
reduced = torch.mean(output[k])
output[k] = reduced
return output
def clip_gradients(self):
if self.gradient_clip_val > 0:
model = self.get_model()
torch.nn.utils.clip_grad_norm_(model.parameters(), self.gradient_clip_val)
def print_nan_gradients(self):
model = self.get_model()
for param in model.parameters():
if (param.grad is not None) and torch.isnan(param.grad.float()).any():
logging.info(param, param.grad)
def configure_accumulated_gradients(self, accumulate_grad_batches):
self.accumulate_grad_batches = None
if isinstance(accumulate_grad_batches, dict):
self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches)
elif isinstance(accumulate_grad_batches, int):
schedule = {1: accumulate_grad_batches}
self.accumulation_scheduler = GradientAccumulationScheduler(schedule)
else:
raise TypeError("Gradient accumulation supports only int and dict types")
def get_dataloaders(self, model):
self.init_train_dataloader(model)
self.init_test_dataloader(model)
self.init_val_dataloader(model)
if self.use_ddp:
dist.barrier()
self.get_train_dataloader()
self.get_test_dataloaders()
self.get_val_dataloaders()
def init_train_dataloader(self, model):
self.fisrt_epoch = True
self.get_train_dataloader = model.train_dataloader
if isinstance(self.get_train_dataloader(), torch.utils.data.DataLoader):
self.num_training_batches = len(self.get_train_dataloader())
self.num_training_batches = int(self.num_training_batches)
else:
self.num_training_batches = float('inf')
self.is_iterable_train_dataloader = True
if isinstance(self.val_check_interval, int):
self.val_check_batch = self.val_check_interval
else:
self._percent_range_check('val_check_interval')
self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
self.val_check_batch = max(1, self.val_check_batch)
def init_val_dataloader(self, model):
self.get_val_dataloaders = model.val_dataloader
self.num_val_batches = 0
if self.get_val_dataloaders() is not None:
if isinstance(self.get_val_dataloaders()[0], torch.utils.data.DataLoader):
self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())
self.num_val_batches = int(self.num_val_batches)
else:
self.num_val_batches = float('inf')
def init_test_dataloader(self, model):
self.get_test_dataloaders = model.test_dataloader
if self.get_test_dataloaders() is not None:
if isinstance(self.get_test_dataloaders()[0], torch.utils.data.DataLoader):
self.num_test_batches = sum(len(dataloader) for dataloader in self.get_test_dataloaders())
self.num_test_batches = int(self.num_test_batches)
else:
self.num_test_batches = float('inf')
def evaluate(self, model, dataloaders, max_batches, test=False):
"""Run evaluation code.
:param model: PT model
:param dataloaders: list of PT dataloaders
:param max_batches: Scalar
:param test: boolean
:return:
"""
# enable eval mode
model.zero_grad()
model.eval()
# copy properties for forward overrides
self.copy_trainer_model_properties(model)
# disable gradients to save memory
torch.set_grad_enabled(False)
if test:
self.get_model().test_start()
# bookkeeping
outputs = []
# run training
for dataloader_idx, dataloader in enumerate(dataloaders):
dl_outputs = []
for batch_idx, batch in enumerate(dataloader):
if batch is None: # pragma: no cover
continue
# stop short when on fast_dev_run (sets max_batch=1)
if batch_idx >= max_batches:
break
# -----------------
# RUN EVALUATION STEP
# -----------------
output = self.evaluation_forward(model,
batch,
batch_idx,
dataloader_idx,
test)
# track outputs for collation
dl_outputs.append(output)
# batch done
if test:
self.test_progress_bar.update(1)
else:
self.val_progress_bar.update(1)
self.main_progress_bar.update(1)
outputs.append(dl_outputs)
# with a single dataloader don't pass an array
if len(dataloaders) == 1:
outputs = outputs[0]
# give model a chance to do something with the outputs (and method defined)
model = self.get_model()
if test:
eval_results_ = model.test_end(outputs)
else:
eval_results_ = model.validation_end(outputs)
if eval_results_ is not None:
eval_results = eval_results_
# enable train mode again
model.train()
# enable gradients to save memory
torch.set_grad_enabled(True)
return eval_results
def run_evaluation(self, test=False):
# when testing make sure user defined a test step
model = self.get_model()
model.on_pre_performance_check()
# select dataloaders
if test:
dataloaders = self.get_test_dataloaders()
max_batches = self.num_test_batches
else:
# val
dataloaders = self.get_val_dataloaders()
max_batches = self.num_val_batches
# init validation or test progress bar
# main progress bar will already be closed when testing so initial position is free
position = 2 * self.process_position + (not test)
desc = 'Testing' if test else 'Validating'
pbar = tqdm.tqdm(desc=desc, total=max_batches, leave=test, position=position,
disable=not self.show_progress_bar, dynamic_ncols=True,
unit='batch', file=sys.stdout)
setattr(self, f'{"test" if test else "val"}_progress_bar', pbar)
# run evaluation
eval_results = self.evaluate(self.model,
dataloaders,
max_batches,
test)
_, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output(
eval_results)
# add metrics to prog bar
self.add_tqdm_metrics(prog_bar_metrics)
# log metrics
self.log_metrics(log_metrics, {})
# track metrics for callbacks
self.callback_metrics.update(callback_metrics)
# hook
model.on_post_performance_check()
# add model specific metrics
tqdm_metrics = self.training_tqdm_dict
if not test:
self.main_progress_bar.set_postfix(**tqdm_metrics)
# close progress bar
if test:
self.test_progress_bar.close()
else:
self.val_progress_bar.close()
# model checkpointing
if self.proc_rank == 0 and self.checkpoint_callback is not None and not test:
self.checkpoint_callback.on_epoch_end(epoch=self.current_epoch,
logs=self.callback_metrics)
def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test=False):
# make dataloader_idx arg in validation_step optional
args = [batch, batch_idx]
if test and len(self.get_test_dataloaders()) > 1:
args.append(dataloader_idx)
elif not test and len(self.get_val_dataloaders()) > 1:
args.append(dataloader_idx)
# handle DP, DDP forward
if self.use_ddp or self.use_dp:
output = model(*args)
return output
# single GPU
if self.single_gpu:
# for single GPU put inputs on gpu manually
root_gpu = 0
if isinstance(self.data_parallel_device_ids, list):
root_gpu = self.data_parallel_device_ids[0]
batch = self.transfer_batch_to_gpu(batch, root_gpu)
args[0] = batch
# CPU
if test:
output = model.test_step(*args)
else:
output = model.validation_step(*args)
return output
def train(self):
model = self.get_model()
# run all epochs
for epoch in range(self.current_epoch, 1000000):
# set seed for distributed sampler (enables shuffling for each epoch)
if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):
self.get_train_dataloader().sampler.set_epoch(epoch)
# get model
model = self.get_model()
# update training progress in trainer and model
model.current_epoch = epoch
self.current_epoch = epoch
total_val_batches = 0
if not self.disable_validation:
# val can be checked multiple times in epoch
is_val_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
val_checks_per_epoch = self.num_training_batches // self.val_check_batch
val_checks_per_epoch = val_checks_per_epoch if is_val_epoch else 0
total_val_batches = self.num_val_batches * val_checks_per_epoch
# total batches includes multiple val checks
self.total_batches = self.num_training_batches + total_val_batches
self.batch_loss_value = 0 # accumulated grads
if self.is_iterable_train_dataloader:
# for iterable train loader, the progress bar never ends
num_iterations = None
else:
num_iterations = self.total_batches
# reset progress bar
# .reset() doesn't work on disabled progress bar so we should check
if not self.main_progress_bar.disable:
self.main_progress_bar.reset(num_iterations)
desc = f'Epoch {epoch + 1}' if not self.is_iterable_train_dataloader else ''
self.main_progress_bar.set_description(desc)
# changing gradient according accumulation_scheduler
self.accumulation_scheduler.on_epoch_begin(epoch, self)
# -----------------
# RUN TNG EPOCH
# -----------------
self.run_training_epoch()
# update LR schedulers
if self.lr_schedulers is not None:
for lr_scheduler in self.lr_schedulers:
lr_scheduler.step(epoch=self.current_epoch)
self.main_progress_bar.close()
model.on_train_end()
if self.logger is not None:
self.logger.finalize("success")
def run_training_epoch(self):
# before epoch hook
if self.is_function_implemented('on_epoch_start'):
model = self.get_model()
model.on_epoch_start()
# run epoch
for batch_idx, batch in enumerate(self.get_train_dataloader()):
# stop epoch if we limited the number of training batches
if batch_idx >= self.num_training_batches:
break
self.batch_idx = batch_idx
model = self.get_model()
model.global_step = self.global_step
# ---------------
# RUN TRAIN STEP
# ---------------
output = self.run_training_batch(batch, batch_idx)
batch_result, grad_norm_dic, batch_step_metrics = output
# when returning -1 from train_step, we end epoch early
early_stop_epoch = batch_result == -1
# ---------------
# RUN VAL STEP
# ---------------
should_check_val = (
not self.disable_validation and self.global_step % self.val_check_batch == 0 and not self.fisrt_epoch)
self.fisrt_epoch = False
if should_check_val:
self.run_evaluation(test=self.testing)
# when logs should be saved
should_save_log = (batch_idx + 1) % self.log_save_interval == 0 or early_stop_epoch
if should_save_log:
if self.proc_rank == 0 and self.logger is not None:
self.logger.save()
# when metrics should be logged
should_log_metrics = batch_idx % self.row_log_interval == 0 or early_stop_epoch
if should_log_metrics:
# logs user requested information to logger
self.log_metrics(batch_step_metrics, grad_norm_dic)
self.global_step += 1
self.total_batch_idx += 1
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if early_stop_epoch:
break
if self.global_step > self.max_updates:
print("| Training end..")
exit()
# epoch end hook
if self.is_function_implemented('on_epoch_end'):
model = self.get_model()
model.on_epoch_end()
def run_training_batch(self, batch, batch_idx):
# track grad norms
grad_norm_dic = {}
# track all metrics for callbacks
all_callback_metrics = []
# track metrics to log
all_log_metrics = []
if batch is None:
return 0, grad_norm_dic, {}
# hook
if self.is_function_implemented('on_batch_start'):
model_ref = self.get_model()
response = model_ref.on_batch_start(batch)
if response == -1:
return -1, grad_norm_dic, {}
splits = [batch]
self.hiddens = None
for split_idx, split_batch in enumerate(splits):
self.split_idx = split_idx
# call training_step once per optimizer
for opt_idx, optimizer in enumerate(self.optimizers):
# make sure only the gradients of the current optimizer's paramaters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if len(self.optimizers) > 1:
for param in self.get_model().parameters():
param.requires_grad = False
for group in optimizer.param_groups:
for param in group['params']:
param.requires_grad = True
# wrap the forward step in a closure so second order methods work
def optimizer_closure():
# forward pass
output = self.training_forward(
split_batch, batch_idx, opt_idx, self.hiddens)
closure_loss = output[0]
progress_bar_metrics = output[1]
log_metrics = output[2]
callback_metrics = output[3]
self.hiddens = output[4]
if closure_loss is None:
return None
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
closure_loss = closure_loss / self.accumulate_grad_batches
# backward pass
model_ref = self.get_model()
if closure_loss.requires_grad:
model_ref.backward(closure_loss, optimizer)
# track metrics for callbacks
all_callback_metrics.append(callback_metrics)
# track progress bar metrics
self.add_tqdm_metrics(progress_bar_metrics)
all_log_metrics.append(log_metrics)
# insert after step hook
if self.is_function_implemented('on_after_backward'):
model_ref = self.get_model()
model_ref.on_after_backward()
return closure_loss
# calculate loss
loss = optimizer_closure()
if loss is None:
continue
# nan grads
if self.print_nan_grads:
self.print_nan_gradients()
# track total loss for logging (avoid mem leaks)
self.batch_loss_value += loss.item()
# gradient update with accumulated gradients
if (self.batch_idx + 1) % self.accumulate_grad_batches == 0:
# track gradient norms when requested
if batch_idx % self.row_log_interval == 0:
if self.track_grad_norm > 0:
model = self.get_model()
grad_norm_dic = model.grad_norm(
self.track_grad_norm)
# clip gradients
self.clip_gradients()
# calls .step(), .zero_grad()
# override function to modify this behavior
model = self.get_model()
model.optimizer_step(self.current_epoch, batch_idx, optimizer, opt_idx)
# calculate running loss for display
self.running_loss.append(self.batch_loss_value)
self.batch_loss_value = 0
self.avg_loss = np.mean(self.running_loss[-100:])
# activate batch end hook
if self.is_function_implemented('on_batch_end'):
model = self.get_model()
model.on_batch_end()
# update progress bar
self.main_progress_bar.update(1)
self.main_progress_bar.set_postfix(**self.training_tqdm_dict)
# collapse all metrics into one dict
all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}
# track all metrics for callbacks
self.callback_metrics.update({k: v for d in all_callback_metrics for k, v in d.items()})
return 0, grad_norm_dic, all_log_metrics
def training_forward(self, batch, batch_idx, opt_idx, hiddens):
"""
Handle forward for each training case (distributed, single gpu, etc...)
:param batch:
:param batch_idx:
:return:
"""
# ---------------
# FORWARD
# ---------------
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx, opt_idx]
# distributed forward
if self.use_ddp or self.use_dp:
output = self.model(*args)
# single GPU forward
elif self.single_gpu:
gpu_id = 0
if isinstance(self.data_parallel_device_ids, list):
gpu_id = self.data_parallel_device_ids[0]
batch = self.transfer_batch_to_gpu(copy.copy(batch), gpu_id)
args[0] = batch
output = self.model.training_step(*args)
# CPU forward
else:
output = self.model.training_step(*args)
# allow any mode to define training_end
model_ref = self.get_model()
output_ = model_ref.training_end(output)
if output_ is not None:
output = output_
# format and reduce outputs accordingly
output = self.process_output(output, train=True)
return output
# ---------------
# Utils
# ---------------
def is_function_implemented(self, f_name):
model = self.get_model()
f_op = getattr(model, f_name, None)
return callable(f_op)
def _percent_range_check(self, name):
value = getattr(self, name)
msg = f"`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}."
if name == "val_check_interval":
msg += " If you want to disable validation set `val_percent_check` to 0.0 instead."
if not 0. <= value <= 1.:
raise ValueError(msg)
| 58,649 | 34.959534 | 122 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/tts_utils/audio.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import traceback
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import librosa
import librosa.filters
import numpy as np
import torch
from scipy import signal
from scipy.io import wavfile
def save_wav(wav, path, sr, norm=False):
if norm:
wav = wav / np.abs(wav).max()
wav *= 32767
# proposed by @dsmiller
wavfile.write(path, sr, wav.astype(np.int16))
def get_hop_size(hparams):
hop_size = hparams['hop_size']
if hop_size is None:
assert hparams['frame_shift_ms'] is not None
hop_size = int(hparams['frame_shift_ms'] / 1000 * hparams['audio_sample_rate'])
return hop_size
###########################################################################################
def griffin_lim(S, hparams):
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
y = _istft(S_complex * angles, hparams)
for i in range(hparams['griffin_lim_iters']):
angles = np.exp(1j * np.angle(_stft(y, hparams)))
y = _istft(S_complex * angles, hparams)
return y
def preemphasis(wav, k, preemphasize=True):
if preemphasize:
return signal.lfilter([1, -k], [1], wav)
return wav
def inv_preemphasis(wav, k, inv_preemphasize=True):
if inv_preemphasize:
return signal.lfilter([1], [1, -k], wav)
return wav
def _stft(y, hparams):
return librosa.stft(y=y, n_fft=hparams['n_fft'], hop_length=get_hop_size(hparams),
win_length=hparams['win_size'], pad_mode='constant')
def _istft(y, hparams):
return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams['win_size'])
##########################################################
# Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
def num_frames(length, fsize, fshift):
"""Compute number of time frames of spectrogram
"""
pad = (fsize - fshift)
if length % fshift == 0:
M = (length + pad * 2 - fsize) // fshift + 1
else:
M = (length + pad * 2 - fsize) // fshift + 2
return M
def pad_lr(x, fsize, fshift):
"""Compute left and right padding
"""
M = num_frames(len(x), fsize, fshift)
pad = (fsize - fshift)
T = len(x) + 2 * pad
r = (M - 1) * fshift + fsize - T
return pad, pad + r
##########################################################
def librosa_pad_lr(x, fsize, fshift, pad_sides=1):
'''compute right padding (final frame) or both sides padding (first and final frames)
'''
assert pad_sides in (1, 2)
# return int(fsize // 2)
pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0]
if pad_sides == 1:
return 0, pad
else:
return pad // 2, pad // 2 + pad % 2
# Conversions
_mel_basis = None
_inv_mel_basis = None
def _linear_to_mel(spectogram, hparams):
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis(hparams)
return np.dot(_mel_basis, spectogram)
def _mel_to_linear(mel_spectrogram, hparams):
global _inv_mel_basis
if _inv_mel_basis is None:
_inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))
return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))
def _build_mel_basis(hparams):
assert hparams['fmax'] <= hparams['audio_sample_rate'] // 2
return librosa.filters.mel(hparams['audio_sample_rate'], hparams['n_fft'], n_mels=hparams['audio_num_mel_bins'],
fmin=hparams['fmin'], fmax=hparams['fmax'])
def amp_to_db(x):
return 20 * np.log10(np.maximum(1e-5, x))
def db_to_amp(x):
return np.power(10.0, (x) * 0.05)
def normalize(S, hparams):
return (S - hparams['min_level_db']) / -hparams['min_level_db']
def denormalize(D, hparams):
return (D * -hparams['min_level_db']) + hparams['min_level_db']
def plot_spec(spec, path, info=None):
fig = plt.figure(figsize=(14, 7))
heatmap = plt.pcolor(spec)
fig.colorbar(heatmap)
xlabel = 'Time'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Mel filterbank')
plt.tight_layout()
plt.savefig(path, format='png')
plt.close(fig)
def plot_curve(x, path, ymin=None, ymax=None):
fig = plt.figure(figsize=(14, 7))
if isinstance(x, list):
for x_ in x:
plt.plot(x_)
else:
plt.plot(x)
if ymin is not None:
plt.ylim(ymin, ymax)
plt.tight_layout()
plt.savefig(path, format='png')
plt.close(fig)
# Compute the mel scale spectrogram from the wav
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return np.exp(x) / C
| 4,999 | 25.455026 | 116 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/tts_utils/__init__.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import glob
import logging
import re
import time
from collections import defaultdict
import os
import sys
import shutil
import types
import numpy as np
import torch
import torch.nn.functional as F
import torch.distributed as dist
def reduce_tensors(metrics):
new_metrics = {}
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
dist.all_reduce(v)
v = v / dist.get_world_size()
if type(v) is dict:
v = reduce_tensors(v)
new_metrics[k] = v
return new_metrics
def tensors_to_scalars(metrics):
new_metrics = {}
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
if type(v) is dict:
v = tensors_to_scalars(v)
new_metrics[k] = v
return new_metrics
def move_to_cpu(tensors):
ret = {}
for k, v in tensors.items():
if isinstance(v, torch.Tensor):
v = v.cpu()
if type(v) is dict:
v = move_to_cpu(v)
ret[k] = v
return ret
def move_to_cuda(tensor):
if torch.cuda.is_available():
return tensor.cuda(non_blocking=True)
return tensor
def count_parameters(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters())
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def collate_1d(values, pad_idx=0, left_pad=False, max_len=None):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values) if max_len is None else max_len
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
def collate_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None):
"""Convert a list of 2d tensors into a padded 3d tensor."""
size = max(v.size(0) for v in values) if max_len is None else max_len
res = values[0].new(len(values), size, values[0].shape[1]).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if shift_right:
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
def _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
if len(batch) == 0:
return 0
if len(batch) == max_sentences:
return 1
if num_tokens > max_tokens:
return 1
return 0
def batch_by_size(
indices, num_tokens_fn, max_tokens=None, max_sentences=None,
required_batch_size_multiple=1, distributed=False
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
"""
max_tokens = max_tokens if max_tokens is not None else sys.maxsize
max_sentences = max_sentences if max_sentences is not None else sys.maxsize
bsz_mult = required_batch_size_multiple
if isinstance(indices, types.GeneratorType):
indices = np.fromiter(indices, dtype=np.int64, count=-1)
sample_len = 0
sample_lens = []
batch = []
batches = []
for i in range(len(indices)):
idx = indices[i]
num_tokens = num_tokens_fn(idx)
sample_lens.append(num_tokens)
sample_len = max(sample_len, num_tokens)
assert sample_len <= max_tokens, (
"sentence at index {} of size {} exceeds max_tokens "
"limit of {}!".format(idx, sample_len, max_tokens)
)
num_tokens = (len(batch) + 1) * sample_len
if _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
mod_len = max(
bsz_mult * (len(batch) // bsz_mult),
len(batch) % bsz_mult,
)
batches.append(batch[:mod_len])
batch = batch[mod_len:]
sample_lens = sample_lens[mod_len:]
sample_len = max(sample_lens) if len(sample_lens) > 0 else 0
batch.append(idx)
if len(batch) > 0:
batches.append(batch)
return batches
def make_positions(tensor, padding_idx):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (
torch.cumsum(mask, dim=1).type_as(mask) * mask
).long() + padding_idx
def softmax(x, dim):
return F.softmax(x, dim=dim, dtype=torch.float32)
def sequence_mask(lengths, maxlen, dtype=torch.bool):
if maxlen is None:
maxlen = lengths.max()
mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t()
mask.type(dtype)
return mask
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
def _get_full_incremental_state_key(module_instance, key):
module_name = module_instance.__class__.__name__
# assign a unique ID to each module instance, so that incremental state is
# not shared across module instances
if not hasattr(module_instance, '_instance_id'):
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
module_instance._instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
return '{}.{}.{}'.format(module_name, module_instance._instance_id, key)
def get_incremental_state(module, incremental_state, key):
"""Helper for getting incremental state for an nn.Module."""
full_key = _get_full_incremental_state_key(module, key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(module, incremental_state, key, value):
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = _get_full_incremental_state_key(module, key)
incremental_state[full_key] = value
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float('-inf')).type_as(t)
def fill_with_neg_inf2(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(-1e9).type_as(t)
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def get_all_ckpts(checkpoint_name):
all_ckpts = glob.glob(checkpoint_name)
all_ckpts = [x for x in all_ckpts if len(re.findall(r'.*/checkpoint(\d+).pt', x)) > 0]
return sorted(all_ckpts, key=lambda x: -int(re.findall(r'.*/checkpoint(\d+).pt', x)[0]))
def save(model_path, model, epoch, step, optimizer, best_valid_loss=None, is_best=True):
if isinstance(optimizer, dict):
optimizer_states = {k: x.state_dict() for k, x in optimizer.items()}
else:
optimizer_states = optimizer.state_dict()
if isinstance(model, dict):
model_states = {k: (x.state_dict() if not hasattr(x, 'module') else x.module.state_dict())
for k, x in model.items()}
else:
model_states = model.state_dict() if not hasattr(model, 'module') else model.module.state_dict()
state_dict = {
'model': model_states,
'optimizer': optimizer_states,
'epoch': epoch,
'step': step,
'best_valid_loss': best_valid_loss,
}
filename = os.path.join(model_path, 'checkpoint{}.pt'.format(epoch))
all_ckpts = get_all_ckpts(os.path.join(model_path, 'checkpoint*.pt'))
for c in all_ckpts[5:]:
logging.info(f"Remove ckpt: {c}")
os.remove(c)
torch.save(state_dict, filename)
newest_filename = os.path.join(model_path, 'checkpoint_latest.pt')
shutil.copyfile(filename, newest_filename)
logging.info(f'Save ckpt: {filename}.')
if is_best:
best_filename = os.path.join(model_path, 'checkpoint_best.pt')
shutil.copyfile(filename, best_filename)
logging.info(f'Find best ckpt.')
def load(model_path):
if os.path.isdir(model_path):
newest_filename = os.path.join(model_path, 'checkpoint_latest.pt')
else:
assert os.path.isfile(model_path), model_path
newest_filename = model_path
if not os.path.exists(newest_filename):
return None, 0, 0, None, float('inf')
state_dict = torch.load(newest_filename, map_location="cpu")
model_state_dict = state_dict['model']
epoch = state_dict['epoch']
step = state_dict['step']
optimizer_state_dict = state_dict['optimizer']
best_valid_loss = state_dict['best_valid_loss'] if state_dict['best_valid_loss'] is not None else float('inf')
return model_state_dict, epoch, step, optimizer_state_dict, best_valid_loss
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.makedirs(os.path.join(path, 'scripts'), exist_ok=True)
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def debug_log(fname, *args):
with open(fname, 'a+') as f:
for c in args:
f.write('{}\n'.format(c))
def unpack_dict_to_list(samples):
samples_ = []
bsz = samples.get('outputs').size(0)
for i in range(bsz):
res = {}
for k, v in samples.items():
try:
res[k] = v[i]
except:
pass
samples_.append(res)
return samples_
def get_focus_rate(attn, src_padding_mask=None, tgt_padding_mask=None):
'''
attn: bs x L_t x L_s
'''
if src_padding_mask is not None:
attn = attn * (1 - src_padding_mask.float())[:, None, :]
if tgt_padding_mask is not None:
attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
focus_rate = attn.max(-1).values.sum(-1)
focus_rate = focus_rate / attn.sum(-1).sum(-1)
return focus_rate
def get_word_coverage_rate(attn, src_padding_mask=None, src_seg_mask=None, tgt_padding_mask=None):
'''
attn: bs x L_t x L_s
'''
return
def get_phone_coverage_rate(attn, src_padding_mask=None, src_seg_mask=None, tgt_padding_mask=None):
'''
attn: bs x L_t x L_s
'''
src_mask = attn.new(attn.size(0), attn.size(-1)).bool().fill_(False)
if src_padding_mask is not None:
src_mask |= src_padding_mask
if src_seg_mask is not None:
src_mask |= src_seg_mask
attn = attn * (1 - src_mask.float())[:, None, :]
if tgt_padding_mask is not None:
attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
phone_coverage_rate = attn.max(1).values.sum(-1)
# phone_coverage_rate = phone_coverage_rate / attn.sum(-1).sum(-1)
phone_coverage_rate = phone_coverage_rate / (1 - src_mask.float()).sum(-1)
return phone_coverage_rate
def get_diagonal_focus_rate(attn, attn_ks, target_len, src_padding_mask=None, tgt_padding_mask=None,
band_mask_factor=5, band_width=50):
'''
attn: bx x L_t x L_s
attn_ks: shape: tensor with shape [batch_size], input_lens/output_lens
diagonal: y=k*x (k=attn_ks, x:output, y:input)
1 0 0
0 1 0
0 0 1
y>=k*(x-width) and y<=k*(x+width):1
else:0
'''
# width = min(target_len/band_mask_factor, 50)
width1 = target_len / band_mask_factor
width2 = target_len.new(target_len.size()).fill_(band_width)
width = torch.where(width1 < width2, width1, width2).float()
base = torch.ones(attn.size()).to(attn.device)
zero = torch.zeros(attn.size()).to(attn.device)
x = torch.arange(0, attn.size(1)).to(attn.device)[None, :, None].float() * base
y = torch.arange(0, attn.size(2)).to(attn.device)[None, None, :].float() * base
cond = (y - attn_ks[:, None, None] * x)
cond1 = cond + attn_ks[:, None, None] * width[:, None, None]
cond2 = cond - attn_ks[:, None, None] * width[:, None, None]
mask1 = torch.where(cond1 < 0, zero, base)
mask2 = torch.where(cond2 > 0, zero, base)
mask = mask1 * mask2
if src_padding_mask is not None:
attn = attn * (1 - src_padding_mask.float())[:, None, :]
if tgt_padding_mask is not None:
attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
diagonal_attn = attn * mask
diagonal_focus_rate = diagonal_attn.sum(-1).sum(-1) / attn.sum(-1).sum(-1)
return diagonal_focus_rate, mask
def generate_arch(n, layers, num_ops=10):
def _get_arch():
arch = [np.random.randint(1, num_ops + 1) for _ in range(layers)]
return arch
archs = [_get_arch() for i in range(n)]
return archs
def parse_arch_to_seq(arch):
seq = [op for op in arch]
return seq
def parse_seq_to_arch(seq):
arch = [idx for idx in seq]
return arch
def pairwise_accuracy(la, lb):
n = len(la)
assert n == len(lb)
total = 0
count = 0
for i in range(n):
for j in range(i + 1, n):
if la[i] >= la[j] and lb[i] >= lb[j]:
count += 1
if la[i] < la[j] and lb[i] < lb[j]:
count += 1
total += 1
return float(count) / total
def hamming_distance(la, lb):
N = len(la)
assert N == len(lb)
def _hamming_distance(s1, s2):
n = len(s1)
assert n == len(s2)
c = 0
for i, j in zip(s1, s2):
if i != j:
c += 1
return c
dis = 0
for i in range(N):
line1 = la[i]
line2 = lb[i]
dis += _hamming_distance(line1, line2)
return dis / N
def sample_arch(arch_pool, prob=None):
N = len(arch_pool)
indices = [i for i in range(N)]
if prob is not None:
prob = np.array(prob, dtype=np.float32)
prob = prob / prob.sum()
index = np.random.choice(indices, p=prob)
else:
index = np.random.choice(indices)
arch = arch_pool[index]
return arch
def select_attn(attn_logits, type='best'):
"""
:param attn_logits: [n_layers, B, n_head, T_sp, T_txt]
:return:
"""
encdec_attn = torch.stack(attn_logits, 0).transpose(1, 2)
# [n_layers * n_head, B, T_sp, T_txt]
encdec_attn = (encdec_attn.reshape([-1, *encdec_attn.shape[2:]])).softmax(-1)
if type == 'best':
indices = encdec_attn.max(-1).values.sum(-1).argmax(0)
encdec_attn = encdec_attn.gather(
0, indices[None, :, None, None].repeat(1, 1, encdec_attn.size(-2), encdec_attn.size(-1)))[0]
return encdec_attn
elif type == 'mean':
return encdec_attn.mean(0)
def get_num_heads(arch):
num_heads = []
for i in range(len(arch)):
op = arch[i]
if op <= 7 or op == 11:
num_heads.append(1)
elif op == 8:
num_heads.append(2)
elif op == 9:
num_heads.append(4)
elif op == 10:
num_heads.append(8)
return num_heads
def remove_padding(x, padding_idx=0):
if x is None:
return None
assert len(x.shape) in [1, 2]
if len(x.shape) == 2: # [T, H]
return x[np.abs(x).sum(-1) != padding_idx]
elif len(x.shape) == 1: # [T]
return x[x != padding_idx]
class Timer:
timer_map = {}
def __init__(self, name, print_time=False):
if name not in Timer.timer_map:
Timer.timer_map[name] = 0
self.name = name
self.print_time = print_time
def __enter__(self):
self.t = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
Timer.timer_map[self.name] += time.time() - self.t
if self.print_time:
print(self.name, Timer.timer_map[self.name])
| 17,042 | 29.931034 | 114 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/tts_utils/world_utils.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##########
# world
##########
import numpy as np
import pysptk
import copy
import torch
gamma = 0
mcepInput = 3 # 0 for dB, 3 for magnitude
alpha = 0.45
en_floor = 10 ** (-80 / 20)
FFT_SIZE = 2048
def code_harmonic(sp, order):
# get mcep
mceps = np.apply_along_axis(pysptk.mcep, 1, sp, order - 1, alpha, itype=mcepInput, threshold=en_floor)
# do fft and take real
scale_mceps = copy.copy(mceps)
scale_mceps[:, 0] *= 2
scale_mceps[:, -1] *= 2
mirror = np.hstack([scale_mceps[:, :-1], scale_mceps[:, -1:0:-1]])
mfsc = np.fft.rfft(mirror).real
return mfsc
def decode_harmonic(mfsc, fftlen=FFT_SIZE):
# get mcep back
mceps_mirror = np.fft.irfft(mfsc)
mceps_back = mceps_mirror[:, :60]
mceps_back[:, 0] /= 2
mceps_back[:, -1] /= 2
# get sp
spSm = np.exp(np.apply_along_axis(pysptk.mgc2sp, 1, mceps_back, alpha, gamma, fftlen=fftlen).real)
return spSm
f0_bin = 256
f0_max = 1100.0
f0_min = 50.0
def f0_to_coarse(f0):
f0_mel = 1127 * np.log(1 + f0 / 700)
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
# f0_mel[f0_mel == 0] = 0
# 大于0的分为255个箱
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
f0_mel[f0_mel < 0] = 1
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
f0_coarse = np.rint(f0_mel).astype(np.int)
# print('Max f0', np.max(f0_coarse), ' ||Min f0', np.min(f0_coarse))
assert (np.max(f0_coarse) <= 256 and np.min(f0_coarse) >= 0)
return f0_coarse
def f0_to_coarse_torch(f0):
f0_mel = 1127 * (1 + f0 / 700).log()
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
# f0_mel[f0_mel == 0] = 0
# 大于0的分为255个箱
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
f0_mel[f0_mel < 0] = 1
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
f0_coarse = f0_mel.long()
# print('Max f0', np.max(f0_coarse), ' ||Min f0', np.min(f0_coarse))
assert (f0_coarse.max() <= 256 and f0_coarse.min() >= 0)
return f0_coarse
def process_f0(f0, hparams):
f0_ = (f0 - hparams['f0_mean']) / hparams['f0_std']
f0_[f0 == 0] = np.interp(np.where(f0 == 0)[0], np.where(f0 > 0)[0], f0_[f0 > 0])
uv = (torch.FloatTensor(f0) == 0).float()
f0 = f0_
f0 = torch.FloatTensor(f0)
return f0, uv
def restore_pitch(pitch, uv, hparams, pitch_padding=None, min=None, max=None):
if pitch_padding is None:
pitch_padding = pitch == -200
pitch = pitch * hparams['f0_std'] + hparams['f0_mean']
if min is not None:
pitch = pitch.clamp(min=min)
if max is not None:
pitch = pitch.clamp(max=max)
if uv is not None:
pitch[uv > 0] = 1
pitch[pitch_padding] = 0
return pitch
| 2,926 | 26.87619 | 106 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/tts_utils/preprocessor.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import warnings
import torch
from skimage.transform import resize
from tts_utils.world_utils import f0_to_coarse
warnings.filterwarnings("ignore")
import struct
import webrtcvad
from scipy.ndimage.morphology import binary_dilation
import pyworld as pw
import librosa
import numpy as np
from tts_utils import audio
import pyloudnorm as pyln
from tts_utils.parse_textgrid import remove_empty_lines, TextGrid
from librosa.util import normalize
from scipy.io.wavfile import read
from librosa.filters import mel as librosa_mel_fn
from matplotlib import pyplot as plt
int16_max = (2 ** 15) - 1
def trim_long_silences(path, sr, return_raw_wav=False, norm=True):
"""
Ensures that segments without voice in the waveform remain no longer than a
threshold determined by the VAD parameters in params.py.
:param wav: the raw waveform as a numpy array of floats
:return: the same waveform with silences trimmed away (length <= original wav length)
"""
## Voice Activation Detection
# Window size of the VAD. Must be either 10, 20 or 30 milliseconds.
# This sets the granularity of the VAD. Should not need to be changed.
sampling_rate = 16000
wav_raw, sr = librosa.core.load(path, sr=sr)
if norm:
meter = pyln.Meter(sr) # create BS.1770 meter
loudness = meter.integrated_loudness(wav_raw)
wav_raw = pyln.normalize.loudness(wav_raw, loudness, -20.0)
if np.abs(wav_raw).max() > 1.0:
wav_raw = wav_raw / np.abs(wav_raw).max()
wav = librosa.resample(wav_raw, sr, sampling_rate, res_type='kaiser_best')
vad_window_length = 30 # In milliseconds
# Number of frames to average together when performing the moving average smoothing.
# The larger this value, the larger the VAD variations must be to not get smoothed out.
vad_moving_average_width = 8
# Maximum number of consecutive silent frames a segment can have.
vad_max_silence_length = 12
# Compute the voice detection window size
samples_per_window = (vad_window_length * sampling_rate) // 1000
# Trim the end of the audio to have a multiple of the window size
wav = wav[:len(wav) - (len(wav) % samples_per_window)]
# Convert the float waveform to 16-bit mono PCM
pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16))
# Perform voice activation detection
voice_flags = []
vad = webrtcvad.Vad(mode=3)
for window_start in range(0, len(wav), samples_per_window):
window_end = window_start + samples_per_window
voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2],
sample_rate=sampling_rate))
voice_flags = np.array(voice_flags)
# Smooth the voice detection with a moving average
def moving_average(array, width):
array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2)))
ret = np.cumsum(array_padded, dtype=float)
ret[width:] = ret[width:] - ret[:-width]
return ret[width - 1:] / width
audio_mask = moving_average(voice_flags, vad_moving_average_width)
audio_mask = np.round(audio_mask).astype(np.bool)
# Dilate the voiced regions
audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1))
audio_mask = np.repeat(audio_mask, samples_per_window)
audio_mask = resize(audio_mask, (len(wav_raw),)) > 0
if return_raw_wav:
return wav_raw, audio_mask
return wav_raw[audio_mask], audio_mask
# hifi-gan-compatible mel processing
# borrowed from HiFi-GAN: https://github.com/jik876/hifi-gan/blob/master/meldataset.py
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
MAX_WAV_VALUE = 32768.0
def load_wav(full_path):
sampling_rate, data = read(full_path)
return data, sampling_rate
def dynamic_range_compression(x, C=1, clip_val=1e-5):
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
def dynamic_range_decompression(x, C=1):
return np.exp(x) / C
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
return torch.exp(x) / C
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
def spectral_de_normalize_torch(magnitudes):
output = dynamic_range_decompression_torch(magnitudes)
return output
mel_basis = {}
hann_window = {}
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
if fmax not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
y = y.squeeze(1)
# complex tensor as default, then use view_as_real for future pytorch compatibility
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec
def process_utterance_hfg(wav_path,
fft_size=1024,
hop_size=256,
win_length=1024,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
eps=1e-10,
sample_rate=22050,
loud_norm=False,
min_level_db=-100,
return_linear=False,
trim_long_sil=False,
vocoder='hfg'):
audio, sampling_rate = load_wav(wav_path)
audio = audio / MAX_WAV_VALUE
audio = normalize(audio) * 0.95
wav = audio
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
mel = mel_spectrogram(audio, fft_size, num_mels,
sample_rate, hop_size, win_length, fmin, fmax,
center=False)
mel = mel[0].numpy()
assert wav.shape[0] >= mel.shape[1] * hop_size, "size mismatch"
wav = wav[:mel.shape[1]*hop_size]
if not return_linear:
return wav, mel
else:
raise NotImplementedError
# pitch calculation
def get_pitch(wav_data, mel, hparams):
"""
:param wav_data: [T]
:param mel: [T, 80]
:param hparams:
:return:
"""
_f0, t = pw.dio(wav_data.astype(np.double), hparams['audio_sample_rate'],
frame_period=hparams['hop_size'] / hparams['audio_sample_rate'] * 1000)
f0 = pw.stonemask(wav_data.astype(np.double), _f0, t, hparams['audio_sample_rate']) # pitch refinement
delta_l = len(mel) - len(f0)
assert np.abs(delta_l) <= 2
if delta_l > 0:
f0 = np.concatenate([f0] + [f0[-1]] * delta_l)
f0 = f0[:len(mel)]
pitch_coarse = f0_to_coarse(f0) + 1
return f0, pitch_coarse
# mel2ph calculation
def get_mel2ph_p(tg_fn, ph, phone_encoded, mel, hparams):
ph_list = ph.split(" ")
with open(tg_fn, "r") as f:
tg = f.readlines()
tg = remove_empty_lines(tg)
tg = TextGrid(tg)
tg = json.loads(tg.toJson())
split = np.zeros(len(ph_list) + 1, np.int)
split[0] = 0
split[-1] = len(mel)
tg_idx = 0
ph_idx = 1
tg_align = [x for x in tg['tiers'][0]['items']]
while tg_idx < len(tg_align):
ph = ph_list[ph_idx]
x = tg_align[tg_idx]
if x['text'] == '':
tg_idx += 1
continue
if x['text'] not in ['punc', 'sep']:
assert x['text'] == ph_list[ph_idx].lower(), (x['text'], ph_list[ph_idx])
if x['text'] == 'sep':
assert ph == '|', (ph, '|')
split[ph_idx] = int(float(x['xmin']) * hparams['audio_sample_rate'] / hparams['hop_size'])
ph_idx += 1
tg_idx += 1
assert tg_idx == len(tg_align), ph_idx == len(ph_list)
split[ph_idx] = int(float(x['xmax']) * hparams['audio_sample_rate'] / hparams['hop_size'])
mel2ph = np.zeros([mel.shape[0]], np.int)
mel2ph_encoded = np.zeros([mel.shape[0]], np.int) - 1
assert len(ph_list) == len(phone_encoded)
for ph_idx in range(len(ph_list)):
mel2ph[split[ph_idx]:split[ph_idx + 1]] = ph_idx + 1
mel2ph_encoded[split[ph_idx]:split[ph_idx+1]] = phone_encoded[ph_idx]
assert np.all(mel2ph_encoded != -1)
mel2ph_torch = torch.from_numpy(mel2ph)
T_t = len(ph_list)
dur = mel2ph_torch.new_zeros([T_t + 1]).scatter_add(0, mel2ph_torch, torch.ones_like(mel2ph_torch))
dur = dur[1:].numpy()
return mel2ph, mel2ph_encoded, dur | 10,373 | 35.657244 | 115 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/tts_utils/tts_utils.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn.functional as F
import numpy as np
from tts_utils.stft import STFT
def make_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
Tensor: Mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 1],
[0, 0, 0, 1]],
[[0, 0, 1, 1],
[0, 0, 1, 1]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_pad_mask(lengths, xs, 1)
tensor([[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
>>> make_pad_mask(lengths, xs, 2)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
"""
if length_dim == 0:
raise ValueError("length_dim cannot be 0: {}".format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(
slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
)
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def make_non_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
ByteTensor: mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 0],
[1, 1, 1, 0]],
[[1, 1, 0, 0],
[1, 1, 0, 0]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_non_pad_mask(lengths, xs, 1)
tensor([[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
>>> make_non_pad_mask(lengths, xs, 2)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
return ~make_pad_mask(lengths, xs, length_dim)
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
class GeneralDenoiser(torch.nn.Module):
def __init__(self, filter_length=1024, n_overlap=4, win_length=1024):
super(GeneralDenoiser, self).__init__()
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length / n_overlap),
win_length=win_length).cuda()
def forward(self, audio, noise_audio=None, strength=0.3):
audio_spec, audio_angles = self.stft.transform(audio.cuda().float())
noise_audio_spec = self.stft.transform(noise_audio.cuda().float())[0] \
if noise_audio is not None else torch.ones([1, 513, 1]).cuda() * 0.1
noise_audio_spec = noise_audio_spec.mean(-1)[:, :, None] * strength
audio_spec_denoised = audio_spec - noise_audio_spec
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
| 8,414 | 37.424658 | 82 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/monotonic_align/__init__.py | import numpy as np
import torch
from .monotonic_align.core import maximum_path_c
def maximum_path(value, mask):
""" Cython optimised version.
value: [b, t_x, t_y]
mask: [b, t_x, t_y]
"""
value = value * mask
device = value.device
dtype = value.dtype
value = value.data.cpu().numpy().astype(np.float32)
path = np.zeros_like(value).astype(np.int32)
mask = mask.data.cpu().numpy()
t_x_max = mask.sum(1)[:, 0].astype(np.int32)
t_y_max = mask.sum(2)[:, 0].astype(np.int32)
maximum_path_c(path, value, t_x_max, t_y_max)
return torch.from_numpy(path).to(device=device, dtype=dtype)
| 608 | 26.681818 | 62 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/tasks/base_task.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import matplotlib
matplotlib.use('Agg')
from tts_utils.hparams import hparams, set_hparams
import random
import sys
import numpy as np
import torch.distributed as dist
from pytorch_lightning.loggers import TensorBoardLogger
from tts_utils.pl_utils import LatestModelCheckpoint, BaseTrainer, data_loader, DDP
from torch import nn
import torch.utils.data
import tts_utils
import logging
import os
torch.multiprocessing.set_sharing_strategy(os.getenv('TORCH_SHARE_STRATEGY', 'file_system'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, data_dir, prefix, hparams, shuffle):
super().__init__()
self.hparams = hparams
self.shuffle = shuffle
self.data_dir = data_dir
self.prefix = prefix
self.sort_by_len = hparams['sort_by_len']
self.sizes = None
@property
def _sizes(self):
return self.sizes
def __getitem__(self, index):
raise NotImplementedError
def collater(self, samples):
raise NotImplementedError
def __len__(self):
return len(self._sizes)
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
size = min(self._sizes[index], hparams['max_frames'])
return size
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self))
if self.sort_by_len:
indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')]
else:
indices = np.arange(len(self))
return indices
@property
def num_workers(self):
return int(os.getenv('NUM_WORKERS', 1))
class BaseTask(nn.Module):
def __init__(self, *args, **kwargs):
# dataset configs
super(BaseTask, self).__init__(*args, **kwargs)
self.current_epoch = 0
self.global_step = 0
self.loaded_optimizer_states_dict = {}
self.trainer = None
self.logger = None
self.on_gpu = False
self.use_dp = False
self.use_ddp = False
self.example_input_array = None
self.max_tokens = hparams['max_tokens']
self.max_sentences = hparams['max_sentences']
self.max_eval_tokens = hparams['max_eval_tokens']
if self.max_eval_tokens == -1:
hparams['max_eval_tokens'] = self.max_eval_tokens = self.max_tokens
self.max_eval_sentences = hparams['max_eval_sentences']
if self.max_eval_sentences == -1:
hparams['max_eval_sentences'] = self.max_eval_sentences = self.max_sentences
print('| set hparams: ')
for i, (k, v) in enumerate(sorted(hparams.items())):
print(f"\033[;33;m{k}\033[0m: {v}, ", end="\n" if i % 5 == 4 else "")
print("")
self.model = None
self.training_losses_meter = None
###########
# Training, validation and testing
###########
def build_model(self):
raise NotImplementedError
def on_epoch_start(self):
self.training_losses_meter = {'total_loss': tts_utils.AvgrageMeter()}
def _training_step(self, sample, batch_idx, optimizer_idx):
"""
:param sample:
:param batch_idx:
:return: total loss: torch.Tensor, loss_log: dict
"""
raise NotImplementedError
def training_step(self, sample, batch_idx, optimizer_idx=-1):
loss_ret = self._training_step(sample, batch_idx, optimizer_idx)
self.opt_idx = optimizer_idx
if loss_ret is None:
return {'loss': None}
total_loss, log_outputs = loss_ret
log_outputs = tts_utils.tensors_to_scalars(log_outputs)
for k, v in log_outputs.items():
if k not in self.training_losses_meter:
self.training_losses_meter[k] = tts_utils.AvgrageMeter()
if not np.isnan(v):
self.training_losses_meter[k].update(v)
self.training_losses_meter['total_loss'].update(total_loss.item())
try:
log_outputs['lr'] = self.scheduler.get_lr()
if isinstance(log_outputs['lr'], list):
log_outputs['lr'] = log_outputs['lr'][0]
except:
pass
log_outputs['all_loss'] = total_loss.item()
if optimizer_idx != -1:
log_outputs[f'loss_{optimizer_idx}'] = log_outputs.pop('all_loss')
progress_bar_log = log_outputs
tb_log = {f'tr/{k}': v for k, v in log_outputs.items()}
return {
'loss': total_loss,
'progress_bar': progress_bar_log,
'log': tb_log
}
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx):
optimizer.step()
optimizer.zero_grad()
self.scheduler.step(self.global_step // hparams['accumulate_grad_batches'])
def on_epoch_end(self):
loss_outputs = {k: round(v.avg, 4) for k, v in self.training_losses_meter.items()}
print(f"\n==============\n "
f"Epoch {self.current_epoch} ended. Steps: {self.global_step}. {loss_outputs}"
f"\n==============\n")
def validation_step(self, sample, batch_idx):
"""
:param sample:
:param batch_idx:
:return: output: dict
"""
raise NotImplementedError
def _validation_end(self, outputs):
"""
:param outputs:
:return: loss_output: dict
"""
raise NotImplementedError
def validation_end(self, outputs):
loss_output = self._validation_end(outputs)
print(f"\n==============\n "
f"valid results: {loss_output}"
f"\n==============\n")
return {
'log': {f'val/{k}': v for k, v in loss_output.items()},
'val_loss': loss_output['total_loss']
}
def build_scheduler(self, optimizer):
raise NotImplementedError
def build_optimizer(self, model):
raise NotImplementedError
def configure_optimizers(self):
set_hparams()
self.model = self.build_model()
print(self.model)
optm = self.build_optimizer(self.model)
self.scheduler = self.build_scheduler(optm)
return [optm]
def test_start(self):
pass
def test_step(self, sample, batch_idx):
return self.validation_step(sample, batch_idx)
def test_end(self, outputs):
return self.validation_end(outputs)
###########
# Running configuration
###########
@classmethod
def start(cls):
set_hparams()
os.environ['MASTER_PORT'] = str(random.randint(15000, 30000))
random.seed(hparams['seed'])
np.random.seed(hparams['seed'])
task = cls()
trainer = BaseTrainer(checkpoint_callback=LatestModelCheckpoint(
filepath=hparams['work_dir'],
verbose=True,
monitor='val_loss',
mode='min',
num_keep=999999,
period=1 if hparams['save_ckpt'] else 100000
),
logger=TensorBoardLogger(
save_dir=hparams['work_dir'],
name='lightning_logs',
version='lastest'
),
gradient_clip_val=hparams['clip_grad_norm'],
val_check_interval=hparams['val_check_interval'],
row_log_interval=hparams['log_interval'],
max_updates=hparams['max_updates'],
num_sanity_val_steps=hparams['num_sanity_val_steps'] if not hparams[
'validate'] else 10000,
accumulate_grad_batches=hparams['accumulate_grad_batches'])
if not hparams['infer']: # train
trainer.checkpoint_callback.task = task
trainer.fit(task)
else:
trainer.test(task)
def configure_ddp(self, model, device_ids):
model = DDP(
model,
device_ids=device_ids,
find_unused_parameters=True
)
if dist.get_rank() != 0 and not hparams['debug']:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
random.seed(hparams['seed'])
np.random.seed(hparams['seed'])
return model
def training_end(self, *args, **kwargs):
return None
def init_ddp_connection(self, proc_rank, world_size):
# guarantees unique ports across jobs from same grid search
default_port = 12910
# if user gave a port number, use that one instead
try:
default_port = os.environ['MASTER_PORT']
except Exception:
os.environ['MASTER_PORT'] = str(default_port)
# figure out the root node addr
root_node = '127.0.0.2'
root_node = self.trainer.resolve_root_node_address(root_node)
os.environ['MASTER_ADDR'] = root_node
dist.init_process_group('nccl', rank=proc_rank, world_size=world_size)
@data_loader
def train_dataloader(self):
return None
@data_loader
def test_dataloader(self):
return None
@data_loader
def val_dataloader(self):
return None
def on_load_checkpoint(self, checkpoint):
pass
def on_save_checkpoint(self, checkpoint):
pass
def on_sanity_check_start(self):
pass
def on_train_start(self):
pass
def on_train_end(self):
pass
def on_batch_start(self, batch):
pass
def on_batch_end(self):
pass
def on_pre_performance_check(self):
pass
def on_post_performance_check(self):
pass
def on_before_zero_grad(self, optimizer):
pass
def on_after_backward(self):
pass
def backward(self, loss, optimizer):
loss.backward()
def grad_norm(self, norm_type):
results = {}
total_norm = 0
for name, p in self.named_parameters():
if p.requires_grad:
try:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
norm = param_norm ** (1 / norm_type)
grad = round(norm.data.cpu().numpy().flatten()[0], 3)
results['grad_{}_norm_{}'.format(norm_type, name)] = grad
except Exception:
# this param had no grad
pass
total_norm = total_norm ** (1. / norm_type)
grad = round(total_norm.data.cpu().numpy().flatten()[0], 3)
results['grad_{}_norm_total'.format(norm_type)] = grad
return results
| 11,402 | 31.303116 | 98 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/tasks/priorgrad_inference.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os, glob, re
from tts_utils.hparams import hparams, set_hparams
from tasks.priorgrad import PriorGradDataset
from tasks.priorgrad import PriorGradTask
import torch
import numpy as np
from tqdm import tqdm
set_hparams()
def get_latest_ckpt(dir):
# function that returns the latest checkpoint path from the given dir
ckpt_list = sorted(glob.glob(f'{dir}/model_ckpt_steps_*.ckpt'),
key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0]))
print("INFO: located checkpoint {}. loading...".format(ckpt_list[0]))
return ckpt_list[0]
# build PriorGradTask then the model itself
task = PriorGradTask()
task.model = task.build_model()
# load the latest checkpoint from work_dir defined in hparams
ckpt = torch.load(get_latest_ckpt(hparams['work_dir']))
task.global_step = ckpt['global_step']
task.load_state_dict(ckpt['state_dict'])
# load the fast noise schedule saved during test set inference
if hparams['fast']:
best_schedule_name = 'betas' + str(hparams['fast_iter']) + '_' + hparams['work_dir'].split('/')[-1] + '_' + str(task.global_step) + '.npy'
best_schedule = np.load(os.path.join(hparams['work_dir'], best_schedule_name))
task.model.decoder.params.inference_noise_schedule = best_schedule
print("INFO: saved noise schedule found in {}".format(os.path.join(hparams['work_dir'], best_schedule_name)))
print("diffusion decoder inference noise schedule is reset to {}".format(best_schedule))
# load the model to gpu
task.model.eval().cuda()
# prepare hifi-gan vocoder
task.prepare_vocoder_hfg()
# define PriorGradDataset. will only use the functions (text_to_phone and phone_to_prior) and not the actual test dataset
dataset = PriorGradDataset(hparams['data_dir'], task.phone_encoder, None, hparams, shuffle=False, infer_only=True)
# inference requires phoneme input and the corresponding target_mean and target_std
with open(hparams['inference_text'], 'r') as f:
user_text = f.readlines()
# create sample dir inside work_dir in hparams
if hparams['fast']:
gen_dir = os.path.join(hparams['work_dir'],
f'inference_fast{hparams["fast_iter"]}_{task.global_step}')
else:
gen_dir = os.path.join(hparams['work_dir'],
f'inference_{task.global_step}')
os.makedirs(gen_dir, exist_ok=True)
os.makedirs(f'{gen_dir}/text', exist_ok=True)
os.makedirs(f'{gen_dir}/spec', exist_ok=True)
os.makedirs(f'{gen_dir}/spec_plot', exist_ok=True)
os.makedirs(f'{gen_dir}/wavs', exist_ok=True)
# perform text-to-speech then save mel and wav
with torch.no_grad():
for i, text in enumerate(tqdm(user_text)):
phone = torch.LongTensor(dataset.text_to_phone(text))
target_mean, target_std = dataset.phone_to_prior(phone)
phone = phone.unsqueeze(0).cuda()
target_mean, target_std = target_mean.unsqueeze(0).cuda(), target_std.unsqueeze(0).cuda()
outputs = task.model(phone, None, None, None,
target_mean, target_std, None, None, None, None,
is_training=False, fast_sampling=hparams['fast'])
mel_out = outputs['mel_out'].permute(0, 2, 1) # [1, num_mels, T]
wav_out = task.vocoder(mel_out).squeeze().cpu().numpy() # [1, T_wav]
# save mel and wav
task.save_result(wav_out, mel_out.cpu()[0].T, f'P', i, text, gen_dir)
| 3,439 | 41.469136 | 142 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-acoustic/tasks/priorgrad.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import matplotlib
# matplotlib.use('Agg')
from matplotlib import pyplot as plt
from tts_utils.pl_utils import data_loader
import os, sys
import json
from multiprocessing.pool import Pool
from tqdm import tqdm
from modules.tts_modules import DurationPredictorLoss
from tts_utils.hparams import hparams, set_hparams
from tts_utils.plot import plot_to_figure, numpy_to_figure, spec_numpy_to_figure
from tts_utils.world_utils import restore_pitch, process_f0
from tts_utils.text_encoder import TokenTextEncoder
from tts_utils.indexed_datasets import IndexedDataset
from tts_utils import audio
import torch.distributed as dist
import numpy as np
from tasks.base_task import BaseTask, BaseDataset
from modules.priorgrad import PriorGrad
import time
import torch
import torch.optim
import torch.utils.data
import torch.nn.functional as F
import tts_utils
from g2p_en import G2p
sys.path.append("hifi-gan")
class RSQRTSchedule(object):
def __init__(self, optimizer):
super().__init__()
self.optimizer = optimizer
self.constant_lr = hparams['lr']
self.warmup_updates = hparams['warmup_updates']
self.hidden_size = hparams['hidden_size']
self.lr = hparams['lr']
for param_group in optimizer.param_groups:
param_group['lr'] = self.lr
self.step(0)
def step(self, num_updates):
constant_lr = self.constant_lr
warmup = min(num_updates / self.warmup_updates, 1.0)
rsqrt_decay = max(self.warmup_updates, num_updates) ** -0.5
rsqrt_hidden = self.hidden_size ** -0.5
self.lr = max(constant_lr * warmup * rsqrt_decay * rsqrt_hidden, 1e-7)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
return self.lr
def get_lr(self):
return self.optimizer.param_groups[0]['lr']
class PriorGradDataset(BaseDataset):
"""A dataset that provides helpers for batching."""
def __init__(self, data_dir, phone_encoder, prefix, hparams, shuffle=False, infer_only=False):
super().__init__(data_dir, prefix, hparams, shuffle)
self.phone_encoder = phone_encoder
self.infer_only = infer_only
if not self.infer_only:
self.data = None
self.idx2key = np.load(f'{self.data_dir}/{self.prefix}_all_keys.npy')
self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy')
self.num_spk = hparams['num_spk']
self.use_indexed_ds = hparams['indexed_ds']
self.indexed_bs = None
self.g2p = G2p()
if not self.infer_only:
# filter out items with no pitch
f0s = np.load(f'{self.data_dir}/{prefix}_f0s.npy', allow_pickle=True)
self.avail_idxs = [i for i, f0 in enumerate(f0s) if sum(f0) > 0]
self.sizes = [self.sizes[i] for i in self.avail_idxs]
# pitch stats
f0s = np.load(f'{self.data_dir}/train_f0s.npy', allow_pickle=True)
f0s = np.concatenate(f0s, 0)
f0s = f0s[f0s != 0]
hparams['f0_mean'] = self.f0_mean = np.mean(f0s).item()
hparams['f0_std'] = self.f0_std = np.std(f0s).item()
# phoneme stats loading
self.use_phone_stat = hparams['use_phone_stat'] if 'use_phone_stat' in hparams else False
if self.use_phone_stat:
# loads phoneme statistics. all datasets use training stats
print("INFO: using phoneme-level stats for PriorGrad modeling!")
self.phone_to_mean = torch.from_numpy(np.load(f'{self.data_dir}/train_phone_to_mean.npy', allow_pickle=True))
if hparams['use_std_norm']:
print("INFO: using 0~1 normalized stds!")
self.phone_to_std = torch.from_numpy(np.load(f'{self.data_dir}/train_phone_to_std_norm.npy', allow_pickle=True))
else:
print("INFO: using non-normalized stds!")
self.phone_to_std = torch.from_numpy(np.load(f'{self.data_dir}/train_phone_to_std.npy', allow_pickle=True))
print("INFO: phoneme mean stats: min {:.4f} max {:.4f} mean {:.4f} std {:.4f}".
format(self.phone_to_mean.min(), self.phone_to_mean.max(), self.phone_to_mean.mean(), self.phone_to_mean.std()))
print("INFO: phoneme std stats: min {:.4f} max {:.4f} mean {:.4f} std {:.4f}".
format(self.phone_to_std.min(), self.phone_to_std.max(), self.phone_to_std.mean(), self.phone_to_std.std()))
self.std_min = hparams['std_min']
print("INFO: minimum of std is set to {}".format(self.std_min))
self.std_max = hparams['std_max'] if 'std_max' in hparams else -1
if self.std_max != -1:
print("INFO: maximum of std is set to {}".format(self.std_max))
self.use_std_only = hparams['use_std_only'] if 'use_std_only' in hparams else False
if self.use_std_only:
print("WARNING: use_std_only is true. phone_to_mean is wiped to all zero, falling back to N(0, sigma)!")
self.phone_to_mean = torch.zeros_like(self.phone_to_mean)
print("INFO: phoneme mean stats: min {:.4f} max {:.4f} mean {:.4f} std {:.4f}".
format(self.phone_to_mean.min(), self.phone_to_mean.max(), self.phone_to_mean.mean(), self.phone_to_mean.std()))
self.use_mean_only = hparams['use_mean_only'] if 'use_mean_only' in hparams else False
if self.use_mean_only:
print("WARNING: use_mean_only is true. phone_to_std is wiped to all one, falling back to N(mu, I)!")
self.phone_to_std = torch.ones_like(self.phone_to_std)
print("INFO: phoneme std stats: min {:.4f} max {:.4f} mean {:.4f} std {:.4f}".
format(self.phone_to_std.min(), self.phone_to_std.max(), self.phone_to_std.mean(), self.phone_to_std.std()))
def text_to_phone(self, txt):
# function that converts the user-given text to phoneme sequence used in PriorGrad-acoustic
# the implementation mirrors datasets/tts/lj/prepare.py and datasets/tts/lj/gen_fs2_p.py
# input: text string
# output: encoded phoneme string
phs = [p.replace(" ", "|") for p in self.g2p(txt)]
ph = " ".join(phs)
ph = "<UNK> " + ph + " <EOS>"
phone_encoded = self.phone_encoder.encode(ph)
return phone_encoded
def phone_to_prior(self, phone):
# TTS inference function that returns prior mean and std given the user-given phoneme sequence
# input: phoneme sequence with shape [T]
# output: phoneme-level prior mean and std with shape [T, num_mels]
assert self.use_phone_stat is True, "phone_to_prior does not support the model with use_phone_stat=False."
spec_mean = torch.index_select(self.phone_to_mean, 0, phone)
spec_std = torch.index_select(self.phone_to_std, 0, phone)
return spec_mean, spec_std
def _get_item(self, index):
if not self.use_indexed_ds:
key = self.idx2key[index]
item = np.load(f'{self.data_dir}/{self.prefix}/{key}.npy', allow_pickle=True).item()
else:
if self.indexed_bs is None:
self.indexed_bs = IndexedDataset(f'{self.data_dir}/{self.prefix}')
item = self.indexed_bs[index]
return item
def __getitem__(self, index):
hparams = self.hparams
index = self.avail_idxs[index]
key = self.idx2key[index]
item = self._get_item(index)
spec = torch.Tensor(item['mel'])
energy = (spec.exp() ** 2).sum(-1).sqrt()[:hparams['max_frames']]
mel2ph = torch.LongTensor(item['mel2ph'])[:hparams['max_frames']]
f0, uv = process_f0(item["f0"], hparams)
phone = torch.LongTensor(item['phone'][:hparams['max_input_tokens']])
sample = {
"id": index,
"utt_id": key,
"text": item['txt'],
"source": phone,
"target": spec[:hparams['max_frames']],
"pitch": torch.LongTensor(item.get("pitch"))[:hparams['max_frames']],
"energy": energy,
"f0": f0[:hparams['max_frames']],
"uv": uv[:hparams['max_frames']],
"mel2ph": mel2ph,
}
if self.use_phone_stat:
spec_mean = torch.index_select(self.phone_to_mean, 0, phone)
spec_std = torch.index_select(self.phone_to_std, 0, phone)
sample["target_mean"] = spec_mean
sample["target_std"] = spec_std
if self.num_spk > 1:
sample["spk_id"] = item['spk_id']
sample["spk_embed"] = item['spk_embed']
return sample
def collater(self, samples):
if len(samples) == 0:
return {}
pad_idx = self.phone_encoder.pad()
id = torch.LongTensor([s['id'] for s in samples])
utt_ids = [s['utt_id'] for s in samples]
text = [s['text'] for s in samples]
src_tokens = tts_utils.collate_1d([s['source'] for s in samples], pad_idx)
f0 = tts_utils.collate_1d([s['f0'] for s in samples], -200) if self.hparams['use_pitch_embed'] else None
uv = tts_utils.collate_1d([s['uv'] for s in samples]) if self.hparams['use_pitch_embed'] else None
energy = tts_utils.collate_1d([s['energy'] for s in samples], pad_idx) if self.hparams['use_energy_embed'] else None
mel2ph = tts_utils.collate_1d([s['mel2ph'] for s in samples], pad_idx)
target = tts_utils.collate_2d([s['target'] for s in samples], pad_idx)
prev_output_mels = tts_utils.collate_2d([s['target'] for s in samples], pad_idx, shift_right=True)
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
target_lengths = torch.LongTensor([s['target'].shape[0] for s in samples])
ntokens = sum(len(s['source']) for s in samples)
nmels = sum(len(s['target']) for s in samples)
batch = {
'id': id,
'utt_id': utt_ids,
'nsamples': len(samples),
'ntokens': ntokens,
'nmels': nmels,
'text': text,
'src_tokens': src_tokens,
'mel2ph': mel2ph,
'src_lengths': src_lengths,
'targets': target,
'energy': energy,
'target_lengths': target_lengths,
'prev_output_mels': prev_output_mels,
'pitch': f0,
'uv': uv,
}
if self.use_phone_stat:
target_mean = tts_utils.collate_2d([s['target_mean'] for s in samples], pad_idx)
target_std = tts_utils.collate_2d([s['target_std'] for s in samples], pad_idx)
batch['targets_mean'] = target_mean
# fill one instead of zero for target_std: zero value will cause NaN for scaled_mse_loss
target_std[target_std == 0] = 1
target_std[target_std <= self.std_min] = self.std_min
if self.std_max != -1:
target_std[target_std >= self.std_max] = self.std_max
batch['targets_std'] = target_std
if self.num_spk > 1:
spk_ids = torch.LongTensor([s['spk_id'] for s in samples])
spk_embed = torch.FloatTensor([s['spk_embed'] for s in samples])
batch['spk_ids'] = spk_ids
batch['spk_embed'] = spk_embed
return batch
class PriorGradTask(BaseTask):
def __init__(self, *args, **kwargs):
self.arch = hparams['arch']
if isinstance(self.arch, str):
self.arch = list(map(int, self.arch.strip().split()))
if self.arch is not None:
self.num_heads = tts_utils.get_num_heads(self.arch[hparams['enc_layers']:])
self.vocoder = None
self.phone_encoder = self.build_phone_encoder(hparams['data_dir'])
self.padding_idx = self.phone_encoder.pad()
self.eos_idx = self.phone_encoder.eos()
self.seg_idx = self.phone_encoder.seg()
self.saving_result_pool = None
self.saving_results_futures = None
self.stats = {}
super().__init__(*args, **kwargs)
# super(PriorGradTask, self).__init__()
self.dur_loss_fn = DurationPredictorLoss()
self.mse_loss_fn = torch.nn.MSELoss()
self.use_phone_stat = hparams['use_phone_stat'] if 'use_phone_stat' in hparams else False
@data_loader
def train_dataloader(self):
train_dataset = PriorGradDataset(hparams['data_dir'], self.phone_encoder,
hparams['train_set_name'], hparams, shuffle=True)
return self.build_dataloader(train_dataset, True, self.max_tokens, self.max_sentences,
endless=hparams['endless_ds'])
@data_loader
def val_dataloader(self):
valid_dataset = PriorGradDataset(hparams['data_dir'], self.phone_encoder,
hparams['valid_set_name'], hparams,
shuffle=False)
return self.build_dataloader(valid_dataset, False, self.max_eval_tokens, self.max_eval_sentences)
@data_loader
def test_dataloader(self):
test_dataset = PriorGradDataset(hparams['data_dir'], self.phone_encoder,
hparams['test_set_name'], hparams, shuffle=False)
return self.build_dataloader(test_dataset, False, self.max_eval_tokens, self.max_eval_sentences)
def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None,
required_batch_size_multiple=-1, endless=False):
if required_batch_size_multiple == -1:
required_batch_size_multiple = torch.cuda.device_count()
def shuffle_batches(batches):
np.random.shuffle(batches)
return batches
if max_tokens is not None:
max_tokens *= torch.cuda.device_count()
if max_sentences is not None:
max_sentences *= torch.cuda.device_count()
indices = dataset.ordered_indices()
batch_sampler = tts_utils.batch_by_size(
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
if shuffle:
batches = shuffle_batches(list(batch_sampler))
if endless:
batches = [b for _ in range(1000) for b in shuffle_batches(list(batch_sampler))]
else:
batches = batch_sampler
if endless:
batches = [b for _ in range(1000) for b in batches]
num_workers = dataset.num_workers
if self.trainer.use_ddp:
num_replicas = dist.get_world_size()
rank = dist.get_rank()
batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0]
return torch.utils.data.DataLoader(dataset,
collate_fn=dataset.collater,
batch_sampler=batches,
num_workers=num_workers,
pin_memory=False)
def build_phone_encoder(self, data_dir):
phone_list_file = os.path.join(data_dir, 'phone_set.json')
phone_list = json.load(open(phone_list_file))
return TokenTextEncoder(None, vocab_list=phone_list)
def build_model(self):
arch = self.arch
model = PriorGrad(arch, self.phone_encoder)
print("encoder params:{}".format(sum(p.numel() for p in model.encoder.parameters() if p.requires_grad)))
print("decoder params:{}".format(sum(p.numel() for p in model.decoder.parameters() if p.requires_grad)))
return model
def build_scheduler(self, optimizer):
return RSQRTSchedule(optimizer)
def build_optimizer(self, model):
self.optimizer = optimizer = torch.optim.AdamW(
model.parameters(),
lr=hparams['lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
weight_decay=hparams['weight_decay'])
return optimizer
def _training_step(self, sample, batch_idx, _):
input = sample['src_tokens'] # [B, T_t]
target = sample['targets'] # [B, T_s, 80]
mel2ph = sample['mel2ph'] # [B, T_s]
pitch = sample['pitch']
energy = sample['energy']
uv = sample['uv']
# phoneme-level computed target statistics
target_mean = sample['targets_mean'] if 'targets_mean' in sample else None
target_std = sample['targets_std'] if 'targets_std' in sample else None
# get mask for target beforehand for MAS
# only get the first dim (previously repeated along mel bin)
target_nonpadding = self.weights_nonzero_speech(target)[:, :, 0] # [B, T_mel]
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
loss_output, output = self.run_model(self.model, input, mel2ph, spk_embed, target, target_mean, target_std, target_nonpadding,
pitch=pitch, uv=uv, energy=energy, is_training=True,
return_output=True)
total_loss = sum([v for v in loss_output.values() if v.requires_grad])
loss_output['batch_size'] = target.size()[0]
return total_loss, loss_output
def validation_step(self, sample, batch_idx):
input = sample['src_tokens']
target = sample['targets']
mel2ph = sample['mel2ph']
pitch = sample['pitch']
energy = sample['energy']
uv = sample['uv']
# phoneme-level computed target statistics
target_mean = sample['targets_mean'] if 'targets_mean' in sample else None
target_std = sample['targets_std'] if 'targets_std' in sample else None
# get mask for target beforehand for MAS
# only get the first dim (previously repeated along mel bin)
target_nonpadding = self.weights_nonzero_speech(target)[:, :, 0] # [B, T_mel]
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
outputs = {}
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, input, mel2ph, spk_embed, target, target_mean, target_std, target_nonpadding,
pitch=pitch, uv=uv,
energy=energy, is_training=True,
return_output=True)
outputs['total_loss'] = outputs['losses']['diffusion']
outputs['nmels'] = sample['nmels']
outputs['nsamples'] = sample['nsamples']
outputs = tts_utils.tensors_to_scalars(outputs)
if batch_idx < 10:
# run reverse diffusion for sampling
# why only run reverse for subset?: DDPM takes long time for reverse (if we use 50 or so). too long to evaluate, not worth it
# sample 10 points and monitor these spec losses as proxy
outputs_reverse, model_out_reverse = self.run_model(self.model, input, mel2ph, spk_embed, target, target_mean, target_std, target_nonpadding,
pitch=pitch, uv=uv,
energy=energy, is_training=False,
return_output=True)
if 'pitch_logits' in model_out_reverse:
pitch[uv > 0] = -4
pitch_pred = model_out_reverse['pitch_logits'][:, :, 0]
pitch_pred[model_out_reverse['pitch_logits'][:, :, 1] > 0] = -4
self.logger.experiment.add_figure(f'pitch_{batch_idx}', plot_to_figure({
'gt': pitch[0].detach().cpu().numpy(),
'pred': pitch_pred[0]
.detach().cpu().numpy()
}), self.global_step)
if 'mel_out' in model_out_reverse:
mel_out = model_out_reverse['mel_out'][0].detach().cpu().numpy()
self.logger.experiment.add_figure(f'mel_out_{batch_idx}', spec_numpy_to_figure(mel_out),
self.global_step)
if 'mel' in outputs_reverse:
outputs['losses']['mel'] = outputs_reverse['mel'].item()
if 'encoder_proj_aligned' in model_out: # from MAS encoder_proj
encoder_proj_aligned = model_out['encoder_proj_aligned'][0].detach().cpu().numpy()
self.logger.experiment.add_figure(f'encoder_proj_aligned_{batch_idx}', spec_numpy_to_figure(encoder_proj_aligned),
self.global_step)
# try plotting learned target mean & std
if 'target_mean_aligned' in model_out_reverse:
target_mean_aligned = model_out_reverse['target_mean_aligned'][0].detach().cpu().numpy()
self.logger.experiment.add_figure(f'target_mean_{batch_idx}', spec_numpy_to_figure(target_mean_aligned),
self.global_step)
if 'target_std_aligned' in model_out_reverse:
target_std_aligned = model_out_reverse['target_std_aligned'][0].detach().cpu().numpy()
self.logger.experiment.add_figure(f'target_std_{batch_idx}', spec_numpy_to_figure(target_std_aligned),
self.global_step)
return outputs
def _validation_end(self, outputs):
all_losses_meter = {
'total_loss': tts_utils.AvgrageMeter(),
}
for output in outputs:
n = output['nsamples']
for k, v in output['losses'].items():
if k not in all_losses_meter:
all_losses_meter[k] = tts_utils.AvgrageMeter()
all_losses_meter[k].update(v, n)
all_losses_meter['total_loss'].update(output['total_loss'], n)
return {k: round(v.avg, 4) for k, v in all_losses_meter.items()}
def run_model(self, model, input, mel2ph, spk_embed, target, target_mean, target_std, target_nonpadding,
return_output=False, ref_mel='tgt', pitch=None, uv=None, energy=None, is_training=True):
hparams['global_steps'] = self.global_step
losses = {}
if ref_mel == 'tgt':
ref_mel = target
output = model(input, mel2ph, spk_embed, ref_mel, target_mean, target_std, target_nonpadding, pitch, uv, energy, is_training)
if is_training:
# compute diffusion loss
if self.use_phone_stat:
losses['diffusion'] = self.scaled_mse_loss(output['noise_pred'], output['noise_target'], output['target_mean_aligned'], output['target_std_aligned'])
else:
losses['diffusion'] = self.mse_loss(output['noise_pred'], output['noise_target'])
else:
if hparams['mel_loss'] == 'l1':
losses['mel'] = self.l1_loss(output['mel_out'], target)
if hparams['mel_loss'] == 'mse':
losses['mel'] = self.mse_loss(output['mel_out'], target)
if hparams['dur'] == 'mfa':
losses['dur'] = self.dur_loss(output['dur'], mel2ph, input)
elif hparams['dur'] == 'mas':
assert 'mel2ph_mas' in output, "mel2ph_mas not found in model output!"
assert 'encoder_proj_aligned' in output, "encoder_proj_aligned not found in model output!"
losses['dur'] = self.dur_loss(output['dur'], output['mel2ph_mas'], input)
if hparams['mel_loss'] == 'l1':
losses['encoder'] = self.l1_loss(output['encoder_proj_aligned'], target)
elif hparams['mel_loss'] == 'l2':
losses['encoder'] = self.mse_loss(output['encoder_proj_aligned'], target)
if hparams['use_pitch_embed']:
p_pred = output['pitch_logits']
losses['uv'], losses['f0'] = self.pitch_loss(p_pred, pitch, uv)
if losses['uv'] is None:
del losses['uv']
if hparams['use_energy_embed']:
losses['energy'] = self.energy_loss(output['energy_pred'], energy)
if not return_output:
return losses
else:
return losses, output
def l1_loss(self, decoder_output, target):
# decoder_output : B x T x n_mel
# target : B x T x n_mel
l1_loss = F.l1_loss(decoder_output, target, reduction='none')
weights = self.weights_nonzero_speech(target)
l1_loss = (l1_loss * weights).sum() / weights.sum()
return l1_loss
def mse_loss(self, decoder_output, target):
# decoder_output : B x T x n_mel
# target : B x T x n_mel
mse_loss = F.mse_loss(decoder_output, target, reduction='none')
weights = self.weights_nonzero_speech(target)
mse_loss = (mse_loss * weights).sum() / weights.sum()
return mse_loss
def scaled_mse_loss(self, decoder_output, target, target_mean, target_std):
# inverse of diagonal matrix is 1/x for each element
sigma_inv = torch.reciprocal(target_std)
mse_loss = (((decoder_output - target) * sigma_inv) ** 2)
weights = self.weights_nonzero_speech(target)
mse_loss = (mse_loss * weights).sum() / weights.sum()
return mse_loss
def dur_loss(self, dur_pred, mel2ph, input, split_pause=False, sent_dur_loss=False):
B, T_t = input.shape
dur_gt = mel2ph.new_zeros(B, T_t + 1).scatter_add(1, mel2ph, torch.ones_like(mel2ph))
dur_gt = dur_gt[:, 1:]
nonpadding = (input != 0).float()
if split_pause:
is_pause = (input == self.phone_encoder.seg()) | (input == self.phone_encoder.unk()) | (
input == self.phone_encoder.eos())
is_pause = is_pause.float()
phone_loss = self.dur_loss_fn(dur_pred, dur_gt, (1 - is_pause) * nonpadding) \
* hparams['lambda_dur']
seg_loss = self.dur_loss_fn(dur_pred, dur_gt, is_pause) \
* hparams['lambda_dur']
return phone_loss, seg_loss
ph_dur_loss = self.dur_loss_fn(dur_pred, dur_gt, nonpadding) * hparams['lambda_dur']
if not sent_dur_loss:
return ph_dur_loss
else:
dur_pred = (dur_pred.exp() - 1).clamp(min=0) * nonpadding
dur_gt = dur_gt.float() * nonpadding
sent_dur_loss = F.l1_loss(dur_pred.sum(-1), dur_gt.sum(-1), reduction='none') / dur_gt.sum(-1)
sent_dur_loss = sent_dur_loss.mean()
return ph_dur_loss, sent_dur_loss
def pitch_loss(self, p_pred, pitch, uv):
assert p_pred[..., 0].shape == pitch.shape
assert p_pred[..., 0].shape == uv.shape
nonpadding = (pitch != -200).float().reshape(-1)
if hparams['use_uv']:
uv_loss = (F.binary_cross_entropy_with_logits(
p_pred[:, :, 1].reshape(-1), uv.reshape(-1), reduction='none') * nonpadding).sum() \
/ nonpadding.sum() * hparams['lambda_uv']
nonpadding = (pitch != -200).float() * (uv == 0).float()
nonpadding = nonpadding.reshape(-1)
else:
pitch[uv > 0] = -4
uv_loss = None
pitch_loss_fn = F.l1_loss if hparams['pitch_loss'] == 'l1' else F.mse_loss
pitch_loss = (pitch_loss_fn(
p_pred[:, :, 0].reshape(-1), pitch.reshape(-1), reduction='none') * nonpadding).sum() \
/ nonpadding.sum() * hparams['lambda_pitch']
return uv_loss, pitch_loss
def energy_loss(self, energy_pred, energy):
nonpadding = (energy != 0).float()
loss = (F.mse_loss(energy_pred, energy, reduction='none') * nonpadding).sum() / nonpadding.sum()
loss = loss * hparams['lambda_energy']
return loss
# inference beta grid search implementation, not optimal but serves the purpose
# grid search function modified from WaveGrad: https://github.com/ivanvovk/WaveGrad/blob/master/benchmark.py
# BSD 3-Clause License
# Copyright (c) 2020, Ivan Vovk, All rights reserved.
def get_best_noise_schedule(self,src_tokens, mel2ph, spk_embed=None, ref_mels=None, target_mean=None, target_std=None,
target_nonpadding=None, pitch=None, uv=None, energy=None, is_training=True, fast_sampling=False,
skip_decoder=False, n_iter=6, betas_range_start=1e-4, betas_range_end=1e-1):
assert ref_mels is not None, "we need target mel to search"
# method to get the best inference noise schedule from the trained model.
def generate_betas_grid(n_iter, betas_range):
if n_iter > 12:
return np.array([np.linspace(betas_range_start, betas_range_end, n_iter)])
betas_range = np.log10(betas_range)
exp_step = (betas_range[1] - betas_range[0]) / (n_iter - 1)
exponents = 10 ** np.arange(betas_range[0], betas_range[1] + exp_step, step=exp_step)
max_grid_size = None
# hard-wired grid search spaces
# max_grid_size is defined per n_iter to make good compromise between search speed and the quality of the noise schedule
# too fine-grained max_grid_size does not improve the final audio quality much, but the search speed is way slower
if n_iter == 2:
exponents = np.array([1e-1, 1e-1])
max_grid_size = 9 ** 2
elif n_iter == 6:
exponents = np.array([1e-4, 1e-3, 1e-2, 1e-1, 1e-1, 1e-1])
max_grid_size = 9 ** 6
elif n_iter == 12:
exponents = np.array([1e-4, 1e-4, 1e-3, 1e-3, 1e-2, 1e-2, 1e-2, 1e-2, 1e-1, 1e-1, 1e-1, 1e-1])
max_grid_size = 9 ** 9 # reasonable trade-off. one can increase to 9 ** 10 for more fine-grained search
else:
raise NotImplementedError("Not a valid --fast_iter. Only 2, 6, and 12 steps are supported for the grid search!")
grid = []
state = int(''.join(['1'] * n_iter)) # initial state
final_state = 9 ** n_iter
step = int(np.ceil(final_state / (max_grid_size)))
print("generating {}-step inference schedules for grid search...")
for _ in tqdm(range(max_grid_size)):
multipliers = list(map(int, str(state)))
# hard-wired rules
if n_iter in [2, 3]:
if 0 in multipliers:
state += step
continue
elif n_iter == 6:
if 0 in multipliers or multipliers[3] >= multipliers[4] or multipliers[4] >= multipliers[5]:
state += step
continue
elif n_iter == 12:
if 0 in multipliers or multipliers[0] >= multipliers[1] or multipliers[2] >= multipliers[3] or\
multipliers[4] >= multipliers[5] or multipliers[5] >= multipliers[6] or multipliers[6] >= multipliers[7] or\
multipliers[8] >= multipliers[9] or multipliers[9] >= multipliers[10] or multipliers[10] >= multipliers[11]:
state += step
continue
betas = [mult * exp for mult, exp in zip(multipliers, exponents)]
grid.append(betas)
state += step
return grid
grid = generate_betas_grid(n_iter, (betas_range_start, betas_range_end))
grid_low = grid
best_loss = 999
best_grid = None
for i in tqdm(range(len(grid_low))):
# swap inference noise schedule
self.model.decoder.params.inference_noise_schedule = grid_low[i]
# get test loss
with torch.no_grad():
outputs = self.model(src_tokens, mel2ph, spk_embed, ref_mels,
target_mean, target_std, target_nonpadding, pitch, uv, None,
is_training=False, fast_sampling=hparams['fast'])
mel_out = outputs['mel_out']
loss = self.l1_loss(mel_out, ref_mels).item()
# update best_grid based on best_loss
if loss < best_loss:
print("")
print("better grid found! loss {} grid {}".format(loss, grid_low[i]))
best_loss = loss
best_grid = grid_low[i]
print("best grid: {}".format(best_grid))
best_schedule_name = 'betas'+str(hparams['fast_iter'])+'_'+hparams['work_dir'].split('/')[-1] + '_' + str(self.global_step)
print("saving the best grid to {}".format(best_schedule_name))
np.save(os.path.join(hparams['work_dir'], best_schedule_name), best_grid)
self.model.decoder.params.inference_noise_schedule = best_grid
def test_step(self, sample, batch_idx):
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
input = sample['src_tokens']
# phoneme-level computed target statistics. stats are based on the training set
target_mean = sample['targets_mean'] if 'targets_mean' in sample else None
target_std = sample['targets_std'] if 'targets_std' in sample else None
if hparams['profile_infer']:
if batch_idx % 10 == 0:
torch.cuda.empty_cache()
mel2ph = sample['mel2ph']
pitch = sample['pitch']
uv = sample['uv']
else:
mel2ph = None
pitch = None
uv = None
# try to search for best noise schedule. if the best schedule exists, load and update the noise schedule
if batch_idx == 0 and hparams['fast']:
best_schedule_name = 'betas'+str(hparams['fast_iter'])+'_'+hparams['work_dir'].split('/')[-1] + '_' + str(self.global_step) + '.npy'
if best_schedule_name not in os.listdir(hparams['work_dir']):
print("INFO: searching for best {}-step inference beta schedule!".format(hparams['fast_iter']))
mel2ph_for_search = sample['mel2ph']
ref_mel_for_search = sample['targets']
if hparams['dur'] == 'mas':
# get mask for target beforehand for MAS
# only get the first dim (previously repeated along mel bin)
target_nonpadding_for_search = self.weights_nonzero_speech(ref_mel_for_search)[:, :, 0] # [B, T_mel]
else:
target_nonpadding_for_search = None
self.get_best_noise_schedule(input, mel2ph_for_search, spk_embed, ref_mel_for_search,
target_mean, target_std, target_nonpadding_for_search, pitch, uv, None,
is_training=False, fast_sampling=hparams['fast'],
n_iter=hparams['fast_iter'], betas_range_start=hparams['diff_beta_start'], betas_range_end=hparams['diff_beta_end'])
else:
best_schedule = np.load(os.path.join(hparams['work_dir'], best_schedule_name))
self.model.decoder.params.inference_noise_schedule = best_schedule
print("INFO: saved noise schedule found in {}".format(os.path.join(hparams['work_dir'], best_schedule_name)))
print("diffusion decoder inference noise schedule is reset to {}".format(best_schedule))
if hparams['fast_iter'] > 12:
print("WARNING: --fast_iter higher than 12 is provided. Grid search is disabled and will use the linearly spaced noise schedule!")
print("WARNING: the quality is expected to be WORSE than the grid-searched noise schedule!")
print("WARNING: the officially supported --fast_iter is 2, 6, or 12 steps!")
with tts_utils.Timer('fs', print_time=hparams['profile_infer']):
torch.cuda.synchronize()
tic = time.time()
outputs = self.model(input, mel2ph, spk_embed, None, target_mean, target_std, None, pitch, uv, None,
is_training=False, fast_sampling=hparams['fast'])
torch.cuda.synchronize()
toc = time.time() - tic
wav_dur = outputs['mel_out'].shape[1] * 256 / 22050.
rtf = toc / wav_dur
print("\nRTF: {:.4f}".format(rtf))
# denoise
if hparams['gen_wav_denoise']:
mel2ph_pred = outputs['mel2ph']
input_noise = torch.ones_like(input[:, :1]).long() * 3
mel2ph_noise = torch.ones_like(mel2ph_pred)
mel2ph_noise = mel2ph_noise * (mel2ph_pred > 0).long()
mel2ph_noise = mel2ph_noise[:, :40]
pitch_noise = torch.zeros_like(mel2ph_pred).float()[:, :40]
uv_noise = torch.ones_like(mel2ph_pred)[:, :40]
noise_outputs = self.model(input_noise, mel2ph_noise, spk_embed, None, pitch_noise, uv_noise)
sample['noise_outputs'] = noise_outputs['mel_out']
sample['outputs'] = outputs['mel_out']
sample['pitch_pred'] = outputs.get('pitch')
sample['phoneme_aligned'] = outputs['phoneme_aligned']
sample['uv'] = outputs['uv']
if sample['pitch'] is not None:
sample['pitch'] = restore_pitch(sample['pitch'], uv if hparams['use_uv'] else None, hparams)
if 'encoder_proj_aligned' in outputs: # MAS only
sample['encoder_proj_aligned'] = outputs['encoder_proj_aligned']
return self.after_infer(sample)
def prepare_vocoder_hfg(self):
import json
from env import AttrDict
from models import Generator
from inference import load_checkpoint
checkpoint_file = "hifigan_pretrained/generator_v1"
config_file = os.path.join(os.path.split(checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data)
h = AttrDict(json_config)
torch.manual_seed(h.seed)
device = torch.device('cuda')
self.vocoder = Generator(h).to(device)
state_dict_g = load_checkpoint(checkpoint_file, device)
self.vocoder.load_state_dict(state_dict_g['generator'])
self.vocoder.eval()
self.vocoder.remove_weight_norm()
def inv_spec_hfg(self, spec):
"""
:param spec: [T, 80]
:return:
"""
spec = torch.FloatTensor(spec).unsqueeze(0).permute(0, 2, 1).cuda() # [B, 80 ,T]
y_g_hat = self.vocoder(spec)
audio = y_g_hat.squeeze().cpu().numpy()
return audio
def after_infer(self, predictions):
if self.saving_result_pool is None and not hparams['profile_infer']:
self.saving_result_pool = Pool(1)
self.saving_results_futures = []
if hparams['vocoder'] == 'hfg':
self.prepare_vocoder_hfg()
else:
raise NotImplementedError("unknown vocoder")
predictions = tts_utils.unpack_dict_to_list(predictions)
t = tqdm(predictions)
for num_predictions, prediction in enumerate(t):
for k, v in prediction.items():
if type(v) is torch.Tensor:
prediction[k] = v.cpu().numpy()
utt_id = prediction.get('utt_id')
text = prediction.get('text')
phoneme = prediction.get('phoneme_aligned')
uv = prediction.get('uv')
targets = self.remove_padding(prediction.get("targets"))
outputs = self.remove_padding(prediction["outputs"])
noise_outputs = self.remove_padding(prediction.get("noise_outputs"))
pitch_pred = self.remove_padding(prediction.get("pitch_pred"))
pitch_gt = self.remove_padding(prediction.get("pitch"), -200)
if hparams['fast']:
gen_dir = os.path.join(hparams['work_dir'],
f'generated_fast{hparams["fast_iter"]}_{self.trainer.global_step}_{hparams["gen_dir_name"]}')
else:
gen_dir = os.path.join(hparams['work_dir'],
f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}')
if hparams['vocoder'] == 'hfg':
wav_pred = self.inv_spec_hfg(outputs)
else:
raise NotImplementedError("unknown vocoder")
if not hparams['profile_infer']:
os.makedirs(gen_dir, exist_ok=True)
os.makedirs(f'{gen_dir}/wavs', exist_ok=True)
os.makedirs(f'{gen_dir}/spec_plot', exist_ok=True)
os.makedirs(f'{gen_dir}/pitch_plot', exist_ok=True)
os.makedirs(f'{gen_dir}/spec', exist_ok=True)
os.makedirs(f'{gen_dir}/text', exist_ok=True)
os.makedirs(f'{gen_dir}/phoneme', exist_ok=True)
os.makedirs(f'{gen_dir}/uv', exist_ok=True)
self.saving_results_futures.append(
self.saving_result_pool.apply_async(self.save_result, args=[
wav_pred, outputs, f'P', utt_id, text, gen_dir, [pitch_pred, pitch_gt], noise_outputs, None, None, phoneme, uv]))
if hparams['vocoder'] == 'hfg':
wav_gt = self.inv_spec_hfg(targets)
else:
raise NotImplementedError("unknown vocoder")
if targets is not None:
self.saving_results_futures.append(
self.saving_result_pool.apply_async(self.save_result, args=[
wav_gt, targets, 'G', utt_id, text, gen_dir, pitch_gt, noise_outputs, None, None, phoneme, uv]))
t.set_description(
f"Pred_shape: {outputs.shape}, gt_shape: {targets.shape}")
else:
if 'gen_wav_time' not in self.stats:
self.stats['gen_wav_time'] = 0
self.stats['gen_wav_time'] += len(wav_pred) / hparams['audio_sample_rate']
print('gen_wav_time: ', self.stats['gen_wav_time'])
return {}
@staticmethod
def save_result(wav_out, mel, prefix, utt_id, text, gen_dir,
pitch=None, noise_spec=None, alignment=None, str_phs=None,
phoneme=None, uv=None):
base_fn = f'[{prefix}][{utt_id}]'
base_fn += text.replace(":", "%3A")[:80]
audio.save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'],
norm=hparams['out_wav_norm'])
audio.plot_spec(mel.T, f'{gen_dir}/spec_plot/{base_fn}.png')
with open(f'{gen_dir}/text/{base_fn}.txt', 'w') as f:
f.write(text)
torch.save(mel.T, f'{gen_dir}/spec/{base_fn}.pt')
if pitch is not None:
audio.plot_curve(pitch, f'{gen_dir}/pitch_plot/{base_fn}.png', 50, 500)
if alignment is not None:
fig, ax = plt.subplots(figsize=(12, 16))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none')
decoded_txt = str_phs.split(" ")
ax.set_yticks(np.arange(len(decoded_txt)))
ax.set_yticklabels(list(decoded_txt), fontsize=6)
fig.colorbar(im, ax=ax)
fig.savefig(f'{gen_dir}/attn_plot/{base_fn}_attn.png', format='png')
plt.close()
if phoneme is not None:
torch.save(phoneme, f'{gen_dir}/phoneme/{base_fn}.pt')
if uv is not None:
torch.save(uv, f'{gen_dir}/uv/{base_fn}.pt')
def test_end(self, outputs):
self.saving_result_pool.close()
[f.get() for f in tqdm(self.saving_results_futures)]
self.saving_result_pool.join()
return {}
##########
# utils
##########
def remove_padding(self, x, padding_idx=0):
return tts_utils.remove_padding(x, padding_idx)
def weights_nonzero_speech(self, target):
# target : B x T x mel
# Assign weight 1.0 to all labels except for padding (id=0).
dim = target.size(-1)
return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim)
if __name__ == '__main__':
PriorGradTask.start()
| 44,586 | 48.762277 | 165 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect/eval_aishell.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
import torch
import argparse
import re
#from fastcorrect_model import FastCorrectModel
import os
import os.path
import time
import json
import numpy as np
from fairseq import utils
utils.import_user_module(argparse.Namespace(user_dir='./FastCorrect'))
from FastCorrect.fastcorrect_model import FastCorrectModel
def remove_ch_spaces(input_str):
return re.sub(r"(?<=[\u4e00-\u9fff])(\s+)(?=[\u4e00-\u9fff])", "", input_str.strip())
try:
model_name_or_path = sys.argv[3]
except:
model_name_or_path = "checkpoints/shared_baseline"
try:
iter_decode_max_iter = int(sys.argv[4])
except:
iter_decode_max_iter = -1
try:
edit_thre = float(sys.argv[5])
except:
edit_thre = 0
try:
test_epoch = int(sys.argv[6])
checkpoint_file = "checkpoint{}.pt".format(test_epoch)
except:
test_epoch = 'best'
checkpoint_file = "checkpoint_best.pt"
print("test {}/{}".format(model_name_or_path, checkpoint_file))
data_name_or_path = # <Path-to-AISHELL1-Training-Binary-Data>
bpe = "sentencepiece"
sentencepiece_model = # <path-to-sentencepiece_model>, you can use arbitrary sentencepiece for our pretrained model since it is a char-level model
commonset_dir = "./eval_data"
res_dir = os.path.join(model_name_or_path, ("results_aishell" if (iter_decode_max_iter == -1) else ("results_aishell_b" + str(iter_decode_max_iter) + '_t' + str(edit_thre))).replace('results', 'results_' + str(test_epoch)))
tmp_dir = os.path.join(model_name_or_path, ("tmp_aishell" if (iter_decode_max_iter == -1) else ("tmp_aishell_b" + str(iter_decode_max_iter) + '_t' + str(edit_thre))).replace('tmp', 'tmp_' + str(test_epoch)))
os.makedirs(res_dir, exist_ok=True)
os.makedirs(tmp_dir, exist_ok=True)
try:
short_set = sys.argv[1].split(',')
except:
raise ValueError()
print("short_set:", short_set)
#transf_gec = FastCorrectModel.from_pretrained(model_name_or_path, checkpoint_file=checkpoint_file, data_name_or_path=data_name_or_path, bpe=bpe, sentencepiece_model=sentencepiece_model, user_dir='./FastCorrect')
transf_gec = FastCorrectModel.from_pretrained(model_name_or_path, checkpoint_file=checkpoint_file, data_name_or_path=data_name_or_path, bpe=bpe, sentencepiece_model=sentencepiece_model)
transf_gec.eval()
transf_gec.cuda()
for input_tsv in [os.path.join(commonset_dir, f, "data.json") for f in short_set]:
all_time = []
eval_origin_dict = json.load(open(input_tsv, 'r', encoding='utf-8'))
translate_input_dict = {}
for k, v in eval_origin_dict["utts"].items():
translate_input_dict[k] = (v["output"][0]["rec_token"].replace('<eos>', '').strip(), v["output"][0]["token"])
translated_output_dict = {}
for k, v in translate_input_dict.items():
text = v[0]
#print(text)
gt = v[1]
start_time = time.time()
time_ok = False
try:
if iter_decode_max_iter != -1:
text = transf_gec.binarize(text)
batched_hypos = transf_gec.generate(text, iter_decode_max_iter=iter_decode_max_iter)
translated = [transf_gec.decode(hypos[0]['tokens']) for hypos in batched_hypos][0]
#translated = transf_gec.translate(text, iter_decode_max_iter=iter_decode_max_iter, edit_thre=edit_thre)
else:
translated = transf_gec.translate(text)
if isinstance(translated, tuple):
all_time.append(translated[1])
time_ok = True
translated = translated[0]
translated_output_dict[k] = (text, gt, translated)
except Exception as e:
print(input_tsv + "\t" + text + "\n")
translated_list.append(text)
raise e
continue
end_time = time.time()
if not time_ok:
all_time.append(end_time - start_time)
eval_origin_dict["utts"][k]["output"][0]["rec_text"] = " ".join("".join(translated.split()).replace('▁', ' ').strip().split())
translated_char = [i for i in eval_origin_dict["utts"][k]["output"][0]["rec_text"]]
eval_origin_dict["utts"][k]["output"][0]["rec_token"] = " ".join(translated_char)
#print(" ".join(translated_char))
os.makedirs(os.path.join(res_dir, input_tsv.split('/')[-2]), exist_ok=True)
with open(os.path.join(res_dir, input_tsv.split('/')[-2], input_tsv.split('/')[-2] + "_time.txt"), 'w') as outfile:
outfile.write("{}\t{}\t{}\n".format(len(all_time), sum(all_time), sum(all_time)/len(all_time)))
json.dump(eval_origin_dict, open(os.path.join(res_dir, input_tsv.split('/')[-2], 'data.json'), 'w', encoding='utf-8'), indent=4, sort_keys=True, ensure_ascii=False)
| 4,731 | 41.630631 | 223 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect/FC_utils/language_pair_dataset.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
import random
import math
logger = logging.getLogger(__name__)
def collate_2d_tokens(
values,
pad_idx,
eos_idx=None,
left_pad=False,
move_eos_to_beginning=False,
pad_to_length=None,
pad_to_multiple=1,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
hidden_size = values[0].size(1)
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
res = values[0].new(len(values), size, hidden_size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
if eos_idx is None:
# if no eos_idx is specified, then use the last token in src
dst[0] = src[-1]
else:
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)])
return res
def collate(
samples,
pad_idx,
eos_idx,
left_pad_source=True,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
pad_to_multiple=1,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
if len(samples[0][key].shape) == 1:
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad,
move_eos_to_beginning,
pad_to_length=pad_to_length,
pad_to_multiple=pad_to_multiple,
)
elif len(samples[0][key].shape) == 2:
return collate_2d_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad,
move_eos_to_beginning,
pad_to_length=pad_to_length,
pad_to_multiple=pad_to_multiple,
)
else:
raise ValueError("Unsupported condition!")
def check_alignment(alignment, src_len, tgt_len):
if alignment is None or len(alignment) == 0:
return False
if (
alignment[:, 0].max().item() >= src_len - 1
or alignment[:, 1].max().item() >= tgt_len - 1
):
logger.warning("alignment size mismatch found, skipping alignment!")
return False
return True
def compute_alignment_weights(alignments):
"""
Given a tensor of shape [:, 2] containing the source-target indices
corresponding to the alignments, a weight vector containing the
inverse frequency of each target index is computed.
For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then
a tensor containing [1., 0.5, 0.5, 1] should be returned (since target
index 3 is repeated twice)
"""
align_tgt = alignments[:, 1]
_, align_tgt_i, align_tgt_c = torch.unique(
align_tgt, return_inverse=True, return_counts=True
)
align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]]
return 1.0 / align_weights.float()
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = merge(
"source",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
# sort by descending source length
if len(samples[0]["source"].shape) == 1:
src_lengths = torch.LongTensor(
[s["source"].ne(pad_idx).long().sum() for s in samples]
)
elif len(samples[0]["source"].shape) == 2:
src_lengths = torch.LongTensor(
[s["source"][:, 0].ne(pad_idx).long().sum() for s in samples]
)
else:
raise ValueError("Unsupported condition!")
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get("wer_dur", None) is not None:
wer_dur = merge(
"wer_dur",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
wer_dur = wer_dur.index_select(0, sort_order)
to_be_edited = merge(
"to_be_edited",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
to_be_edited = to_be_edited.index_select(0, sort_order)
if samples[0].get("target", None) is not None:
target = merge(
"target",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
target = target.index_select(0, sort_order)
if samples[0].get("wer_dur", None) is not None:
for_wer_gather = merge(
"for_wer_gather",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
for_wer_gather = for_wer_gather.index_select(0, sort_order)
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
).index_select(0, sort_order)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target)
elif input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
"target",
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
else:
ntokens = src_lengths.sum().item()
if samples[0].get("wer_dur", None) is not None:
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"wer_dur": wer_dur,
"to_be_edited": to_be_edited,
"for_wer_gather": for_wer_gather,
},
"target": target,
}
else:
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
"target": target,
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select(
0, sort_order
)
if samples[0].get("alignment", None) is not None:
bsz, tgt_sz = batch["target"].shape
src_sz = batch["net_input"]["src_tokens"].shape[1]
offsets = torch.zeros((len(sort_order), 2), dtype=torch.long)
offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz
if left_pad_source:
offsets[:, 0] += src_sz - src_lengths
if left_pad_target:
offsets[:, 1] += tgt_sz - tgt_lengths
alignments = [
alignment + offset
for align_idx, offset, src_len, tgt_len in zip(
sort_order, offsets, src_lengths, tgt_lengths
)
for alignment in [samples[align_idx]["alignment"].view(-1, 2)]
if check_alignment(alignment, src_len, tgt_len)
]
if len(alignments) > 0:
alignments = torch.cat(alignments, dim=0)
align_weights = compute_alignment_weights(alignments)
batch["alignments"] = alignments
batch["align_weights"] = align_weights
if samples[0].get("constraints", None) is not None:
# Collate the packed constraints across the samples, padding to
# the length of the longest sample.
lens = [sample.get("constraints").size(0) for sample in samples]
max_len = max(lens)
constraints = torch.zeros((len(samples), max(lens))).long()
for i, sample in enumerate(samples):
constraints[i, 0 : lens[i]] = samples[i].get("constraints")
batch["constraints"] = constraints
return batch
class LanguagePairDataset(FairseqDataset):
"""
A pair of torch.utils.data.Datasets.
Args:
src (torch.utils.data.Dataset): source dataset to wrap
src_sizes (List[int]): source sentence lengths
src_dict (~fairseq.data.Dictionary): source vocabulary
tgt (torch.utils.data.Dataset, optional): target dataset to wrap
tgt_sizes (List[int], optional): target sentence lengths
tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary
left_pad_source (bool, optional): pad source tensors on the left side
(default: True).
left_pad_target (bool, optional): pad target tensors on the left side
(default: False).
shuffle (bool, optional): shuffle dataset elements before batching
(default: True).
input_feeding (bool, optional): create a shifted version of the targets
to be passed into the model for teacher forcing (default: True).
remove_eos_from_source (bool, optional): if set, removes eos from end
of source if it's present (default: False).
append_eos_to_target (bool, optional): if set, appends eos to end of
target if it's absent (default: False).
align_dataset (torch.utils.data.Dataset, optional): dataset
containing alignments.
constraints (Tensor, optional): 2d tensor with a concatenated, zero-
delimited list of constraints for each sentence.
append_bos (bool, optional): if set, appends bos to the beginning of
source/target sentence.
num_buckets (int, optional): if set to a value greater than 0, then
batches will be bucketed into the given number of batch shapes.
src_lang_id (int, optional): source language ID, if set, the collated batch
will contain a field 'src_lang_id' in 'net_input' which indicates the
source language of the samples.
tgt_lang_id (int, optional): target language ID, if set, the collated batch
will contain a field 'tgt_lang_id' which indicates the target language
of the samples.
"""
def __init__(
self,
src,
src_sizes,
src_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
left_pad_source=True,
left_pad_target=False,
shuffle=True,
input_feeding=True,
remove_eos_from_source=False,
append_eos_to_target=False,
align_dataset=None,
constraints=None,
append_bos=False,
eos=None,
num_buckets=0,
src_lang_id=None,
tgt_lang_id=None,
pad_to_multiple=1,
src_with_werdur=False,
bos_prepended_outside=False,
):
if tgt_dict is not None:
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
if tgt is not None:
assert len(src) == len(
tgt
), "Source and target must contain the same number of examples"
self.src = src
self.tgt = tgt
self.src_sizes = np.array(src_sizes)
self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None
self.sizes = (
np.vstack((self.src_sizes, self.tgt_sizes)).T
if self.tgt_sizes is not None
else self.src_sizes
)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.shuffle = shuffle
self.input_feeding = input_feeding
self.remove_eos_from_source = remove_eos_from_source
self.append_eos_to_target = append_eos_to_target
self.align_dataset = align_dataset
if self.align_dataset is not None:
assert (
self.tgt_sizes is not None
), "Both source and target needed when alignments are provided"
self.constraints = constraints
self.append_bos = append_bos
self.eos = eos if eos is not None else src_dict.eos()
self.src_lang_id = src_lang_id
self.tgt_lang_id = tgt_lang_id
if num_buckets > 0:
from fairseq.data import BucketPadLengthDataset
self.src = BucketPadLengthDataset(
self.src,
sizes=self.src_sizes,
num_buckets=num_buckets,
pad_idx=self.src_dict.pad(),
left_pad=self.left_pad_source,
)
self.src_sizes = self.src.sizes
logger.info("bucketing source lengths: {}".format(list(self.src.buckets)))
if self.tgt is not None:
self.tgt = BucketPadLengthDataset(
self.tgt,
sizes=self.tgt_sizes,
num_buckets=num_buckets,
pad_idx=self.tgt_dict.pad(),
left_pad=self.left_pad_target,
)
self.tgt_sizes = self.tgt.sizes
logger.info(
"bucketing target lengths: {}".format(list(self.tgt.buckets))
)
# determine bucket sizes using self.num_tokens, which will return
# the padded lengths (thanks to BucketPadLengthDataset)
num_tokens = np.vectorize(self.num_tokens, otypes=[np.long])
self.bucketed_num_tokens = num_tokens(np.arange(len(self.src)))
self.buckets = [
(None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens)
]
else:
self.buckets = None
self.pad_to_multiple = pad_to_multiple
self.src_with_werdur = src_with_werdur
self.bos_prepended_outside = bos_prepended_outside
def get_batch_shapes(self):
return self.buckets
def __getitem__(self, index):
tgt_item = self.tgt[index] if self.tgt is not None else None
src_item = self.src[index]
# Append EOS to end of tgt sentence if it does not have an EOS and remove
# EOS from end of src sentence if it exists. This is useful when we use
# use existing datasets for opposite directions i.e., when we want to
# use tgt_dataset as src_dataset and vice versa
if self.append_eos_to_target:
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
if self.tgt and self.tgt[index][-1] != eos:
tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])])
if self.append_bos:
bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
if self.tgt and self.tgt[index][0] != bos:
tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]])
bos = self.src_dict.bos()
if self.src[index][0] != bos:
src_item = torch.cat([torch.LongTensor([bos]), self.src[index]])
if self.remove_eos_from_source:
eos = self.src_dict.eos()
if self.src[index][-1] == eos:
src_item = self.src[index][:-1]
if self.src_with_werdur:
# assert not
src_item_length = int(len(src_item))
#print(src_item_length, src_item)
if self.append_bos or self.bos_prepended_outside: # origin 8, append_bos: 9
assert src_item_length % 2 == 1
werdur_info = src_item[(src_item_length+1)//2:].clone() - 32768
werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[:(src_item_length+1)//2]
else:
assert src_item_length % 2 == 0
werdur_info = src_item[(src_item_length)//2:].clone() - 32768
# werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[:(src_item_length)//2]
to_be_edited = werdur_info.clamp(0, 1)
wer_dur = torch.abs(werdur_info)
for_wer_gather_list = []
for i in range(len(wer_dur)):
for j in range(abs(int(wer_dur[i]))):
for_wer_gather_list.append(i)
for_wer_gather = torch.LongTensor(for_wer_gather_list)
assert len(wer_dur) == len(src_item)
assert len(tgt_item) == len(for_wer_gather)
example = {
"id": index,
"source": src_item,
"target": tgt_item,
"wer_dur": wer_dur,
"to_be_edited": to_be_edited,
"for_wer_gather": for_wer_gather,
}
else:
example = {
"id": index,
"source": src_item,
"target": tgt_item,
}
if self.align_dataset is not None:
example["alignment"] = self.align_dataset[index]
if self.constraints is not None:
example["constraints"] = self.constraints[index]
return example
def __len__(self):
return len(self.src)
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
pad_to_length (dict, optional): a dictionary of
{'source': source_pad_to_length, 'target': target_pad_to_length}
to indicate the max length to pad to in source and target respectively.
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the left if *left_pad_source* is ``True``.
- `src_lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each source sentence of shape `(bsz)`
- `prev_output_tokens` (LongTensor): a padded 2D Tensor of
tokens in the target sentence, shifted right by one
position for teacher forcing, of shape `(bsz, tgt_len)`.
This key will not be present if *input_feeding* is
``False``. Padding will appear on the left if
*left_pad_target* is ``True``.
- `src_lang_id` (LongTensor): a long Tensor which contains source
language IDs of each sample in the batch
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the left if *left_pad_target* is ``True``.
- `tgt_lang_id` (LongTensor): a long Tensor which contains target language
IDs of each sample in the batch
"""
res = collate(
samples,
pad_idx=self.src_dict.pad(),
eos_idx=self.eos,
left_pad_source=self.left_pad_source,
left_pad_target=self.left_pad_target,
input_feeding=self.input_feeding,
pad_to_length=pad_to_length,
pad_to_multiple=self.pad_to_multiple,
)
if self.src_lang_id is not None or self.tgt_lang_id is not None:
src_tokens = res["net_input"]["src_tokens"]
bsz = src_tokens.size(0)
if self.src_lang_id is not None:
res["net_input"]["src_lang_id"] = (
torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens)
)
if self.tgt_lang_id is not None:
res["tgt_lang_id"] = (
torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens)
)
return res
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
if self.src_with_werdur:
return max(
self.src_sizes[index] // 2,
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
else:
return max(
self.src_sizes[index],
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (
self.src_sizes[index],
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self)).astype(np.int64)
else:
indices = np.arange(len(self), dtype=np.int64)
if self.buckets is None:
# sort by target length, then source length
if self.tgt_sizes is not None:
indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")]
return indices[np.argsort(self.src_sizes[indices], kind="mergesort")]
else:
# sort by bucketed_num_tokens, which is:
# max(padded_src_len, padded_tgt_len)
return indices[
np.argsort(self.bucketed_num_tokens[indices], kind="mergesort")
]
@property
def supports_prefetch(self):
return getattr(self.src, "supports_prefetch", False) and (
getattr(self.tgt, "supports_prefetch", False) or self.tgt is None
)
def prefetch(self, indices):
self.src.prefetch(indices)
if self.tgt is not None:
self.tgt.prefetch(indices)
if self.align_dataset is not None:
self.align_dataset.prefetch(indices)
def filter_indices_by_size(self, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
return data_utils.filter_paired_dataset_indices_by_size(
self.src_sizes,
self.tgt_sizes,
indices,
max_sizes,
)
| 24,146 | 38.455882 | 90 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect/FC_utils/hub_utils_fc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import Any, Dict, Iterator, List, Tuple
import torch
from fairseq import utils
from fairseq.data import encoders
from torch import nn
import time
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == "checkpoint_file":
checkpoint_file = v
elif (
k != "path"
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path["path"]
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith("."):
kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs["data"] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
"code": "bpe_codes",
"bpecodes": "bpe_codes",
"sentencepiece.bpe.model": "sentencepiece_model",
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if "user_dir" in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"]))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
"args": args,
"task": task,
"models": models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(args)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(getattr(args, "replace_unk", None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(
self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs
) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(
self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs
) -> List[str]:
if isinstance(sentences, str):
exc_text, exc_time = self.sample([sentences], beam=beam, verbose=verbose, **kwargs)
return exc_text[0], exc_time
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos, exc_time = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos], exc_time
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [
hypos[0]
for hypos in self.generate(
tokenized_sentences, score_reference=True, **kwargs
)
]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.copy(self.args)
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args)
inference_step_args = inference_step_args or {}
results = []
begin_time_exc = 0.0
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
assert begin_time_exc == 0.0
begin_time_exc = time.time()
# print("Input shape:", batch["net_input"]["src_tokens"].shape)
# print("Begin:", time.time())
translations = self.task.inference_step(
generator, self.models, batch, **inference_step_args
)
# print("End:", time.time())
exc_time = time.time() - begin_time_exc
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info("S\t{}".format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo["tokens"])
logger.info("H\t{}\t{}".format(hypo["score"], hypo_str))
logger.info(
"P\t{}".format(
" ".join(
map(
lambda x: "{:.4f}".format(x),
hypo["positional_scores"].tolist(),
)
)
)
)
if hypo["alignment"] is not None and getarg(
"print_alignment", False
):
logger.info(
"A\t{}".format(
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in hypo["alignment"]
]
)
)
)
return outputs, exc_time
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=self.args.max_tokens,
max_sentences=self.args.batch_size,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
| 11,181 | 35.423453 | 95 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect/FC_utils/binarizer_fc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import Counter
import torch
from fairseq.file_io import PathManager
from fairseq.tokenizer import tokenize_line
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
class Binarizer:
@staticmethod
def binarize(
filename,
dict,
consumer,
tokenize=tokenize_line,
append_eos=True,
reverse_order=False,
offset=0,
end=-1,
already_numberized=False,
src_with_werdur=False,
):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
if already_numberized:
assert ' |||| ' not in line, "This constraint is add when doing asr correction exp"
id_strings = line.strip().split()
id_list = [int(id_string) for id_string in id_strings]
if reverse_order:
id_list.reverse()
if append_eos:
id_list.append(dict.eos())
ids = torch.IntTensor(id_list)
else:
if ' |||| ' in line:
assert src_with_werdur
line, werdur_info = line.split(' |||| ')
werdur_list = []
for i in werdur_info.strip().split():
assert abs(int(i)) < 30000
werdur_list.append(int(i) + 32768)
if append_eos:
werdur_list.append(1 + 32768)
werdur_list_length = len(werdur_list)
else:
werdur_list = None
ids = dict.encode_line(
line=line,
line_tokenizer=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
# print(ids)
if werdur_list is not None:
assert werdur_list_length == len(ids)
ids = torch.cat([ids, torch.IntTensor(werdur_list)], dim=-1)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {
"nseq": nseq,
"nunk": sum(replaced.values()),
"ntok": ntok,
"replaced": replaced,
}
@staticmethod
def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1):
nseq = 0
with open(PathManager.get_local_path(filename), "r") as f:
f.seek(offset)
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = alignment_parser(line)
nseq += 1
consumer(ids)
line = f.readline()
return {"nseq": nseq}
@staticmethod
def find_offsets(filename, num_chunks):
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
| 4,394 | 33.606299 | 103 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect/FC_utils/options_fc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.data_class import (
CheckpointParams,
CommonEvalParams,
CommonParams,
DatasetParams,
DistributedTrainingParams,
EvalLMParams,
OptimizationParams,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
# this import is for backward compatibility
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalParams())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonParams())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
group.add_argument("--src-with-werdur", action="store_true", default=False,
help="whether the src file contains werdur-info")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetParams())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingParams(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationParams())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointParams())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalParams())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMParams())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
# fmt: off
group.add_argument('--beam', default=5, type=int, metavar='N',
help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N',
help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--max-len-b', default=200, type=int, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--min-len', default=1, type=float, metavar='N',
help=('minimum generation length'))
group.add_argument('--match-source-len', default=False, action='store_true',
help=('generations should match the source length'))
group.add_argument('--no-early-stop', action='store_true',
help='deprecated')
group.add_argument('--unnormalized', action='store_true',
help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true',
help='don\'t use BeamableMM in attention layers')
group.add_argument('--lenpen', default=1, type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float,
help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None,
help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
group.add_argument('--score-reference', action='store_true',
help='just score the reference translation')
group.add_argument('--prefix-size', default=0, type=int, metavar='PS',
help='initialize generation by target prefix of given length')
group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N',
help='ngram blocking such that this size ngram cannot be repeated in the generation')
group.add_argument('--sampling', action='store_true',
help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',
help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-topp', default=-1.0, type=float, metavar='PS',
help='sample from the smallest set whose cumulative probability mass exceeds p for next words')
group.add_argument('--constraints', const="ordered", nargs="?", choices=["ordered", "unordered"],
help='enables lexically constrained decoding')
group.add_argument('--temperature', default=1., type=float, metavar='N',
help='temperature for generation')
group.add_argument('--diverse-beam-groups', default=-1, type=int, metavar='N',
help='number of groups for Diverse Beam Search')
group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N',
help='strength of diversity penalty for Diverse Beam Search')
group.add_argument('--diversity-rate', default=-1.0, type=float, metavar='N',
help='strength of diversity penalty for Diverse Siblings Search')
group.add_argument('--print-alignment', action='store_true',
help='if set, uses attention feedback to compute and print alignment to source tokens')
group.add_argument('--print-step', action='store_true')
group.add_argument('--lm-path', default=None, type=str, metavar='PATH',
help='path to lm checkpoint for lm fusion')
group.add_argument('--lm-weight', default=0.0, type=float, metavar='N',
help='weight for lm probs for lm fusion')
# arguments for iterative refinement generator
group.add_argument('--iter-decode-eos-penalty', default=0.0, type=float, metavar='N',
help='if > 0.0, it penalized early-stopping in decoding.')
group.add_argument('--iter-decode-max-iter', default=10, type=int, metavar='N',
help='maximum iterations for iterative refinement.')
group.add_argument('--iter-decode-force-max-iter', action='store_true',
help='if set, run exact the maximum number of iterations without early stop')
group.add_argument('--iter-decode-with-beam', default=1, type=int, metavar='N',
help='if > 1, model will generate translations varying by the lengths.')
group.add_argument('--iter-decode-with-external-reranker', action='store_true',
help='if set, the last checkpoint are assumed to be a reranker to rescore the translations'),
group.add_argument('--retain-iter-history', action='store_true',
help='if set, decoding returns the whole history of iterative refinement')
group.add_argument('--retain-dropout', action='store_true',
help='Use dropout at inference time')
group.add_argument('--retain-dropout-modules', default=None, nargs='+', type=str,
help='if set, only retain dropout for the specified modules; '
'if not set, then dropout will be retained for all modules')
# special decoding format for advanced decoding.
group.add_argument('--decoding-format', default=None, type=str, choices=['unigram', 'ensemble', 'vote', 'dp', 'bs'])
# fmt: on
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
# fmt: off
group.add_argument('--buffer-size', default=0, type=int, metavar='N',
help='read this many sentences into a buffer before processing them')
group.add_argument('--input', default='-', type=str, metavar='FILE',
help='file to read from; use - for stdin')
# fmt: on
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group
| 19,822 | 43.346756 | 120 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect/FC_utils/fastcorrect_generator.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import numpy as np
import torch
from fairseq import utils
import time
DecoderOut = namedtuple(
"FastCorrectDecoderOut",
["output_tokens", "output_scores", "attn", "step", "max_step", "history", "to_be_edited_pred", "wer_dur_pred"],
)
class FastCorrectGenerator(object):
def __init__(
self,
tgt_dict,
models=None,
eos_penalty=0.0,
max_iter=10,
max_ratio=2,
beam_size=1,
decoding_format=None,
retain_dropout=False,
adaptive=True,
retain_history=False,
reranking=False,
edit_thre=0.0,
print_werdur=False
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
eos_penalty: if > 0.0, it penalized early-stopping in decoding
max_iter: maximum number of refinement iterations
max_ratio: generate sequences of maximum length ax, where x is the source length
decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
retain_dropout: retaining dropout in the inference
adaptive: decoding with early stop
"""
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.beam_size = beam_size
self.reranking = reranking
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.retain_history = retain_history
self.adaptive = adaptive
self.models = models
self.edit_thre = edit_thre
self.print_werdur = print_werdur
def generate_batched_itr(
self,
data_itr,
maxlen_a=None,
maxlen_b=None,
cuda=False,
timer=None,
prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
for sample in data_itr:
if "net_input" not in sample:
continue
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
self.models,
sample,
prefix_tokens=sample["target"][:, :prefix_size]
if prefix_size > 0
else None,
)
if timer is not None:
timer.stop(sample["ntokens"])
for i, id in enumerate(sample["id"]):
# remove padding
src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
ref = utils.strip_pad(sample["target"][i, :], self.pad)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample, prefix_tokens=None, constraints=None, werdur_gt_str=""):
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the IterativeRefinementGenerator is not supported"
)
# TODO: iterative refinement generator does not support ensemble for now.
if not self.retain_dropout:
for model in models:
model.eval()
model, reranker = models[0], None
if self.reranking:
assert len(models) > 1, "Assuming the last checkpoint is the reranker"
assert (
self.beam_size > 1
), "Reranking requires multiple translation for each example"
reranker = models[-1]
models = models[:-1]
if len(models) > 1 and hasattr(model, "enable_ensemble"):
assert model.allow_ensemble, "{} does not support ensembling".format(
model.__class__.__name__
)
model.enable_ensemble(models)
# TODO: better encoder inputs?
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len = src_tokens.size()[:2]
# initialize
# print("before encoder:", time.time())
encoder_out = model.forward_encoder([src_tokens, src_lengths])
# print("before werdur:", time.time())
if getattr(model.decoder, "wer_dur_weight", None) or getattr(model.decoder, "dur_predictor", None):
prev_decoder_out, encoder_out = model.initialize_output_tokens(encoder_out, src_tokens, self.edit_thre, self.print_werdur, werdur_gt_str=werdur_gt_str)
else:
#raise ValueError("Remove this after debugging")
prev_decoder_out, encoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
# print("before decoder:", time.time())
if self.beam_size > 1:
assert (
model.allow_length_beam
), "{} does not support decoding with length beam.".format(
model.__class__.__name__
)
# regenerate data based on length-beam
length_beam_order = (
utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1)
)
encoder_out = model.encoder.reorder_encoder_out(
encoder_out, length_beam_order
)
prev_decoder_out = model.regenerate_length_beam(
prev_decoder_out, self.beam_size
)
bsz = bsz * self.beam_size
sent_idxs = torch.arange(bsz)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.retain_history:
prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens])
finalized = [[] for _ in range(bsz)]
def is_a_loop(x, y, s, a):
b, l_x, l_y = x.size(0), x.size(1), y.size(1)
if l_x > l_y:
y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1)
s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1)
if a is not None:
a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1)
elif l_x < l_y:
x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1)
return (x == y).all(1), y, s, a
def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn):
cutoff = prev_out_token.ne(self.pad)
tokens = prev_out_token[cutoff]
if prev_out_score is None:
scores, score = None, None
else:
scores = prev_out_score[cutoff]
score = scores.mean()
if prev_out_attn is None:
hypo_attn, alignment = None, None
else:
hypo_attn = prev_out_attn[cutoff]
alignment = hypo_attn.max(dim=1)[1]
return {
"steps": step,
"tokens": tokens,
"positional_scores": scores,
"score": score,
"hypo_attn": hypo_attn,
"alignment": alignment,
}
for step in range(self.max_iter + 1):
decoder_options = {
"eos_penalty": self.eos_penalty,
"max_ratio": self.max_ratio,
"decoding_format": self.decoding_format,
}
prev_decoder_out = prev_decoder_out._replace(
step=step,
max_step=self.max_iter + 1,
)
decoder_out = model.forward_decoder(
prev_decoder_out, encoder_out, **decoder_options
)
if self.adaptive:
# terminate if there is a loop
terminated, out_tokens, out_scores, out_attn = is_a_loop(
prev_output_tokens,
decoder_out.output_tokens,
decoder_out.output_scores,
decoder_out.attn,
)
decoder_out = decoder_out._replace(
output_tokens=out_tokens,
output_scores=out_scores,
attn=out_attn,
)
else:
terminated = decoder_out.output_tokens.new_zeros(
decoder_out.output_tokens.size(0)
).bool()
if step == self.max_iter: # reach last iteration, terminate
terminated.fill_(1)
# collect finalized sentences
finalized_idxs = sent_idxs[terminated]
finalized_tokens = decoder_out.output_tokens[terminated]
finalized_scores = decoder_out.output_scores[terminated]
finalized_attn = (
None
if (decoder_out.attn is None or decoder_out.attn.size(0) == 0)
else decoder_out.attn[terminated]
)
if self.retain_history:
finalized_history_tokens = [h[terminated] for h in decoder_out.history]
for i in range(finalized_idxs.size(0)):
finalized[finalized_idxs[i]] = [
finalized_hypos(
step,
finalized_tokens[i],
finalized_scores[i],
None if finalized_attn is None else finalized_attn[i],
)
]
if self.retain_history:
finalized[finalized_idxs[i]][0]["history"] = []
for j in range(len(finalized_history_tokens)):
finalized[finalized_idxs[i]][0]["history"].append(
finalized_hypos(
step, finalized_history_tokens[j][i], None, None
)
)
# check if all terminated
if terminated.sum() == terminated.size(0):
break
# for next step
not_terminated = ~terminated
prev_decoder_out = decoder_out._replace(
output_tokens=decoder_out.output_tokens[not_terminated],
output_scores=decoder_out.output_scores[not_terminated],
attn=decoder_out.attn[not_terminated]
if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0)
else None,
history=[h[not_terminated] for h in decoder_out.history]
if decoder_out.history is not None
else None,
)
encoder_out = model.encoder.reorder_encoder_out(
encoder_out, not_terminated.nonzero(as_tuple=False).squeeze()
)
sent_idxs = sent_idxs[not_terminated]
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.beam_size > 1:
if reranker is not None:
finalized = self.rerank(
reranker, finalized, [src_tokens, src_lengths], self.beam_size
)
# aggregate information from length beam
finalized = [
finalized[
np.argmax(
[
finalized[self.beam_size * i + j][0]["score"]
for j in range(self.beam_size)
]
)
+ self.beam_size * i
]
for i in range(len(finalized) // self.beam_size)
]
return finalized
def rerank(self, reranker, finalized, encoder_input, beam_size):
def rebuild_batch(finalized):
finalized_tokens = [f[0]["tokens"] for f in finalized]
finalized_maxlen = max(f.size(0) for f in finalized_tokens)
final_output_tokens = (
finalized_tokens[0]
.new_zeros(len(finalized_tokens), finalized_maxlen)
.fill_(self.pad)
)
for i, f in enumerate(finalized_tokens):
final_output_tokens[i, : f.size(0)] = f
return final_output_tokens
final_output_tokens = rebuild_batch(finalized)
final_output_tokens[
:, 0
] = self.eos # autoregressive model assumes starting with EOS
reranker_encoder_out = reranker.encoder(*encoder_input)
length_beam_order = (
utils.new_arange(
final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1)
)
.t()
.reshape(-1)
)
reranker_encoder_out = reranker.encoder.reorder_encoder_out(
reranker_encoder_out, length_beam_order
)
reranking_scores = reranker.get_normalized_probs(
reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out),
True,
None,
)
reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None])
reranking_masks = final_output_tokens[:, 1:].ne(self.pad)
reranking_scores = (
reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1)
)
reranking_scores = reranking_scores / reranking_masks.sum(1).type_as(
reranking_scores
)
for i in range(len(finalized)):
finalized[i][0]["score"] = reranking_scores[i]
return finalized
| 13,998 | 36.530831 | 163 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect/FastCorrect/fastcorrect_task.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import itertools
import logging
logger = logging.getLogger(__name__)
import torch
from fairseq import utils
from fairseq.data import LanguagePairDataset
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask
from fairseq.utils import new_arange
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from language_pair_dataset import LanguagePairDataset
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
src_with_werdur=False,
append_eos_to_target=False,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
src_with_werdur=src_with_werdur,
append_eos_to_target=append_eos_to_target,
bos_prepended_outside=prepend_bos,
)
@register_task("fastcorrect")
class FastCorrectTask(TranslationTask):
"""
Translation (Sequence Generation) task for Levenshtein Transformer
See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument(
'--noise',
default='random_delete',
choices=['random_delete', 'random_mask', 'no_noise', 'full_mask'])
parser.add_argument(
'--use-wer-dur', action="store_true", default=False,
help='Whether to use wer dur in model')
parser.add_argument(
'--src-with-werdur', action="store_true", default=False,
help='Whether the werdur is in dataset')
parser.add_argument(
'--use-soft-sigma', action="store_true", default=False,
help='Whether to use soft sigma'
)
parser.add_argument(
"--dur-predictor-type",
type=str,
default="",
help="dur-predictor-type",
)
# fmt: on
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
prepend_bos=True,
src_with_werdur=self.args.src_with_werdur,
append_eos_to_target=False, # add this although eos already add in data preprocess
)
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0
)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True
)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = (
2
+ (
(target_length - 2)
* target_score.new_zeros(target_score.size(0), 1).uniform_()
).long()
)
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = (
target_tokens.gather(1, target_rank)
.masked_fill_(target_cutoff, pad)
.gather(1, target_rank.masked_fill_(target_cutoff, max_len).sort(1)[1])
)
prev_target_tokens = prev_target_tokens[
:, : prev_target_tokens.ne(pad).sum(1).max()
]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = (
target_tokens.ne(pad) & target_tokens.ne(bos) & target_tokens.ne(eos)
)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk
)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = (
target_tokens.eq(bos) | target_tokens.eq(eos) | target_tokens.eq(pad)
)
return target_tokens.masked_fill(~target_mask, unk)
if self.args.noise == "random_delete":
return _random_delete(target_tokens)
elif self.args.noise == "random_mask":
return _random_mask(target_tokens)
elif self.args.noise == "full_mask":
return _full_mask(target_tokens)
elif self.args.noise == "no_noise":
return target_tokens
else:
raise NotImplementedError
def build_generator(self, models, args, **unused):
# add models input to match the API for SequenceGenerator
from fastcorrect_generator import FastCorrectGenerator
# print("edit_thre:", getattr(args, "edit_thre", 0.0))
return FastCorrectGenerator(
self.target_dictionary,
eos_penalty=getattr(args, "iter_decode_eos_penalty", 0.0),
max_iter=getattr(args, "iter_decode_max_iter", 10),
beam_size=getattr(args, "iter_decode_with_beam", 1),
reranking=getattr(args, "iter_decode_with_external_reranker", False),
decoding_format=getattr(args, "decoding_format", None),
adaptive=not getattr(args, "iter_decode_force_max_iter", False),
retain_history=getattr(args, "retain_iter_history", False),
edit_thre=getattr(args, "edit_thre", 0.0),
print_werdur=getattr(args, "print_werdur", False),
retain_dropout=getattr(args, "retain_dropout", False)
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
# Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/
raise NotImplementedError(
"Constrained decoding with the translation_lev task is not supported"
)
return LanguagePairDataset(
src_tokens, src_lengths, self.source_dictionary, append_bos=True,
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None, werdur_gt_str="",
):
with torch.no_grad():
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, constraints=constraints, werdur_gt_str=werdur_gt_str)
| 13,281 | 34.513369 | 113 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect/FastCorrect/fc_loss.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from torch import Tensor
@register_criterion("fc_loss")
class FastCorrectCriterion(FairseqCriterion):
def __init__(self, task, label_smoothing):
super().__init__(task)
self.label_smoothing = label_smoothing
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument(
"--label-smoothing",
default=0.0,
type=float,
metavar="D",
help="epsilon for label smoothing, 0 means no label smoothing",
)
def _compute_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction="none")
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction="none")
losses = losses.sum(-1)
nll_loss = mean_ds(losses)
if label_smoothing > 0:
loss = (
nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing
)
else:
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor}
def _custom_loss(self, loss, nll_loss=None, name="loss", factor=1.0):
if nll_loss is not None:
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor}
else:
return {"name": name, "loss": loss, "factor": factor}
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
nsentences, ntokens = sample["nsentences"], sample["ntokens"]
# B x T
src_tokens, src_lengths = (
sample["net_input"]["src_tokens"],
sample["net_input"]["src_lengths"],
)
if "wer_dur" in sample["net_input"].keys():
wer_dur = sample["net_input"]["wer_dur"]
to_be_edited = sample["net_input"]["to_be_edited"]
for_wer_gather = sample["net_input"]["for_wer_gather"]
else:
wer_dur = None
to_be_edited = None
for_wer_gather = None
tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"]
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens, wer_dur, to_be_edited, for_wer_gather)
losses, nll_loss = [], []
for obj in outputs:
if outputs[obj].get("loss", None) is None:
_losses = self._compute_loss(
outputs[obj].get("out"),
outputs[obj].get("tgt"),
outputs[obj].get("mask", None),
outputs[obj].get("ls", 0.0),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
)
else:
_losses = self._custom_loss(
outputs[obj].get("loss"),
outputs[obj].get("nll_loss", None),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
)
losses += [_losses]
if outputs[obj].get("nll_loss", False):
nll_loss += [_losses.get("nll_loss", 0.0)]
loss = sum(l["loss"] for l in losses)
nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 else loss.new_tensor(0)
#print("loss, nll_loss", loss, nll_loss)
#for l in losses:
# print(l['name'], l['loss'], utils.item(l["loss"].data / l["factor"]))
# NOTE:
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
for l in losses:
logging_output[l["name"]] = (
utils.item(l["loss"].data / l["factor"])
if reduce
else l[["loss"]].data / l["factor"]
)
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs))
metrics.log_scalar(
"loss", loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
for key in logging_outputs[0]:
if key[-5:] == "-loss":
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(
key[:-5],
val / sample_size / math.log(2) if sample_size > 0 else 0.0,
sample_size,
round=3,
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 7,118 | 35.137056 | 119 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect/FastCorrect/fastcorrect_model.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from fastcorrect_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder, ensemble_encoder
from fairseq.models.transformer import Embedding
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.modules import FairseqDropout
from torch import Tensor
from fairseq.models.transformer import (
TransformerEncoder,
)
import logging
logger = logging.getLogger(__name__)
def Embeddingright(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
return m
def _mean_pooling(enc_feats, src_masks):
# enc_feats: T x B x C
# src_masks: B x T or None
if src_masks is None:
enc_feats = enc_feats.mean(0)
else:
src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats)
enc_feats = (
(enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None]
).sum(0)
return enc_feats
def _argmax(x, dim):
return (x == x.max(dim, keepdim=True)[0]).type_as(x)
@register_model("fastcorrect")
class FastCorrectModel(FairseqNATModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
self.to_be_edited_size = getattr(args, "to_be_edited_size", 1)
if getattr(args, 'assist_edit_loss', False):
print("add assist edit loss!")
self.assist_edit_loss = True
else:
self.assist_edit_loss = False
self.werdur_max_predict = getattr(args, 'werdur_max_predict', 5.0)
print("werdur_max_predict: ", self.werdur_max_predict)
self.werdur_loss_type = getattr(args, 'werdur_loss_type', 'l2')
print("werdur_loss_type: ", self.werdur_loss_type)
if self.werdur_loss_type == 'l2':
self.werdur_loss_func = F.mse_loss
elif self.werdur_loss_type == 'log_l2':
self.werdur_loss_func = self.log_mse_loss
elif self.werdur_loss_type == 'l1':
self.werdur_loss_func = F.l1_loss
elif self.werdur_loss_type == 'log_l1':
self.werdur_loss_func = self.log_l1_loss
else:
raise ValueError("Unsupported werdur_loss_type")
def log_mse_loss(self, hypo, ref, reduction='none'):
hypo = torch.exp(hypo) - 1.0
return F.mse_loss(hypo, ref, reduction=reduction)
def log_l1_loss(self, hypo, ref, reduction='none'):
hypo = torch.exp(hypo) - 1.0
return F.l1_loss(hypo, ref, reduction=reduction)
@property
def allow_length_beam(self):
return True
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
# length prediction
parser.add_argument(
"--assist-edit-loss",
action="store_true",
default=False,
help="whether to use assist edit loss",
)
parser.add_argument(
"--sg-length-pred",
action="store_true",
help="stop the gradients back-propagated from the length predictor",
)
parser.add_argument(
"--length-loss-factor",
type=float,
help="weights on the length prediction loss",
)
parser.add_argument(
"--edit-emb-dim",
type=int,
help="dimension of edit emb",
)
parser.add_argument(
"--to-be-edited-size",
type=int,
help="size of to be edited (2 for edited or not, 4 or insert/delete/change/not do",
)
parser.add_argument(
"--werdur-max-predict",
type=float,
help="max value of werdur",
)
parser.add_argument(
"--werdur-loss-type",
type=str,
help="type of werdur loss",
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = FastCorrectDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
encoder = FastCorrectEncoder(args, src_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
encoder.apply(init_bert_params)
return encoder
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
**kwargs,
):
"""
Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model
file. Downloads and caches the pre-trained model file if needed.
The base implementation returns a
:class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to
generate translations or sample from language models. The underlying
:class:`~fairseq.models.FairseqModel` can be accessed via the
*generator.models* attribute.
Other models may override this to implement custom hub interfaces.
Args:
model_name_or_path (str): either the name of a pre-trained model to
load or a path/URL to a pre-trained model state dict
checkpoint_file (str, optional): colon-separated list of checkpoint
files in the model archive to ensemble (default: 'model.pt')
data_name_or_path (str, optional): point args.data to the archive
at the given path/URL. Can start with '.' or './' to reuse the
model archive path.
"""
import hub_utils_fc
x = hub_utils_fc.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
**kwargs,
)
cls.upgrade_args(x["args"])
logger.info(x["args"])
return hub_utils_fc.GeneratorHubInterface(x["args"], x["task"], x["models"])
def _compute_nll_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction="none")
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction="none")
losses = losses.sum(-1)
nll_loss = mean_ds(losses)
if label_smoothing > 0:
loss = (
nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing
)
else:
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor}, None
def forward_encoder(self, encoder_inputs):
src_tokens, src_lengths = encoder_inputs
#attn_mask = None
return self.encoder(src_tokens, src_lengths=src_lengths)
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, wer_dur=None, to_be_edited=None, for_wer_gather=None, **kwargs
):
# encoding
# attn_mask = None
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
wer_dur_pred, to_be_edited_pred, closest_pred = self.decoder.forward_wer_dur_and_tbe(
normalize=False, encoder_out=encoder_out
)
wer_dur = wer_dur.type_as(wer_dur_pred).clamp(0.0, self.werdur_max_predict) # modify wer_dur is ok because in decoder only use for gather
src_no_pad = (~(encoder_out.encoder_padding_mask))
wer_dur_pred = wer_dur_pred.squeeze(-1)
wer_dur_pred_loss_float = self.werdur_loss_func(wer_dur_pred, wer_dur, reduction='none').float()
wer_dur_pred_loss = wer_dur_pred_loss_float[src_no_pad.bool()].mean().type_as(wer_dur_pred)
if self.assist_edit_loss:
if self.to_be_edited_size == 1:
to_be_edited_pred_loss_float = F.binary_cross_entropy_with_logits(to_be_edited_pred.squeeze(-1), to_be_edited.type_as(to_be_edited_pred), reduction='none').float()
to_be_edited_pred_loss = to_be_edited_pred_loss_float[src_no_pad.bool()].mean().type_as(to_be_edited_pred)
else:
raise ValueError("Unsupported condition!")
else:
to_be_edited_pred_loss = torch.Tensor([0.0])[0]
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
wer_dur=wer_dur,
to_be_edited=to_be_edited, for_wer_gather=for_wer_gather, debug_src_tokens=src_tokens, debug_tgt_tokens=tgt_tokens
)
return_dict = {
"wer_dur_loss": {
"loss": wer_dur_pred_loss,
"factor": self.decoder.length_loss_factor,
},
}
return_dict["word_ins"], _ = self._compute_nll_loss(
word_ins_out,
tgt_tokens,
tgt_tokens.ne(self.pad),
self.args.label_smoothing,
name="word_ins" + "-loss",
factor=1.0,
)
if self.assist_edit_loss:
return_dict['to_be_edited_loss'] = {
"loss": to_be_edited_pred_loss,
"factor": self.decoder.length_loss_factor,
}
return return_dict
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
step = decoder_out.step
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
to_be_edited_pred = decoder_out.to_be_edited_pred
wer_dur_pred = decoder_out.wer_dur_pred
for_wer_gather = wer_dur_pred.cumsum(dim=-1)
for_wer_gather = torch.nn.functional.one_hot(for_wer_gather, num_classes=for_wer_gather.max() + 1)[:, :-1, :-1].sum(-2).cumsum(dim=-1)
# execute the decoder
output_masks = output_tokens.ne(self.pad)
_scores, _tokens = self.decoder(
normalize=True,
prev_output_tokens=output_tokens,
encoder_out=encoder_out,
step=step,
wer_dur=wer_dur_pred,
to_be_edited=to_be_edited_pred, for_wer_gather=for_wer_gather
).max(-1)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
def initialize_output_tokens(self, encoder_out, src_tokens, edit_thre=0.0, print_werdur=False, werdur_gt_str=""):
wer_dur_pred, to_be_edited_pred, closest_pred = self.decoder.forward_wer_dur_and_tbe(
normalize=False, encoder_out=encoder_out
)
if 'log' in self.werdur_loss_type:
wer_dur_pred = (torch.exp(wer_dur_pred) - 1.0).squeeze(-1).round().long().clamp_(min=0)
length_tgt = wer_dur_pred.sum(-1)
else:
wer_dur_pred = wer_dur_pred.squeeze(-1).round().long().clamp_(min=0)
length_tgt = wer_dur_pred.sum(-1)
max_length = length_tgt.clamp_(min=2).max()
#if len(src_tokens.shape) == 3:
# idx_length = utils.new_arange(src_tokens[:, :, 0], max_length)
#else:
idx_length = utils.new_arange(src_tokens, max_length)
initial_output_tokens = src_tokens.new_zeros(
src_tokens.size(0), max_length
).fill_(self.pad)
initial_output_tokens.masked_fill_(
idx_length[None, :] < length_tgt[:, None], self.unk
)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos)
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(encoder_out.encoder_out)
return DecoderOut(
output_tokens=initial_output_tokens,
output_scores=initial_output_scores,
attn=None,
step=0,
max_step=0,
history=None,
to_be_edited_pred=None,
wer_dur_pred=wer_dur_pred
), encoder_out
def regenerate_length_beam(self, decoder_out, beam_size):
output_tokens = decoder_out.output_tokens
length_tgt = output_tokens.ne(self.pad).sum(1)
length_tgt = (
length_tgt[:, None]
+ utils.new_arange(length_tgt, 1, beam_size)
- beam_size // 2
)
length_tgt = length_tgt.view(-1).clamp_(min=2)
max_length = length_tgt.max()
idx_length = utils.new_arange(length_tgt, max_length)
initial_output_tokens = output_tokens.new_zeros(
length_tgt.size(0), max_length
).fill_(self.pad)
initial_output_tokens.masked_fill_(
idx_length[None, :] < length_tgt[:, None], self.unk
)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos)
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(decoder_out.output_scores)
return decoder_out._replace(
output_tokens=initial_output_tokens, output_scores=initial_output_scores
)
class FastCorrectEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
self.ensemble_models = None
@ensemble_encoder
def forward(self, *args, **kwargs):
return super().forward(*args, **kwargs)
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""
def __init__(self, nout, dim=-1, eps=1e-12):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=eps)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
class DurationPredictor(torch.nn.Module):
def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, ffn_layers=1, offset=1.0, ln_eps=1e-12, remove_edit_emb=False, to_be_edited_size=1, padding='SAME'):
"""Initilize duration predictor module.
Args:
idim (int): Input dimension.
n_layers (int, optional): Number of convolutional layers.
n_chans (int, optional): Number of channels of convolutional layers.
kernel_size (int, optional): Kernel size of convolutional layers.
dropout_rate (float, optional): Dropout rate.
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(DurationPredictor, self).__init__()
#'''
self.offset = offset
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
self.padding = padding
self.remove_edit_emb = remove_edit_emb
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [torch.nn.Sequential(
torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
torch.nn.ReLU(),
LayerNorm(n_chans, dim=1, eps=ln_eps),
FairseqDropout(dropout_rate, module_name="DP_dropout")
)]
if ffn_layers == 1:
self.werdur_linear = torch.nn.Linear(n_chans, 1)
self.edit_linear = torch.nn.Linear(n_chans, to_be_edited_size)
else:
assert ffn_layers == 2
self.werdur_linear = torch.nn.Sequential(
torch.nn.Linear(n_chans, n_chans // 2),
torch.nn.ReLU(),
FairseqDropout(dropout_rate, module_name="DP_dropout"),
torch.nn.Linear(n_chans // 2, 1),
)
self.edit_linear = torch.nn.Sequential(
torch.nn.Linear(n_chans, n_chans // 2),
torch.nn.ReLU(),
FairseqDropout(dropout_rate, module_name="DP_dropout"),
torch.nn.Linear(n_chans // 2, to_be_edited_size),
)
#'''
#self.werdur_linear = torch.nn.Linear(idim, 1)
#self.edit_linear = torch.nn.Linear(idim, 1)
def forward(self, xs, x_nonpadding=None):
#'''
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
if self.padding == 'SAME':
xs = F.pad(xs, [self.kernel_size // 2, self.kernel_size // 2])
elif self.padding == 'LEFT':
xs = F.pad(xs, [self.kernel_size - 1, 0])
xs = f(xs) # (B, C, Tmax)
if x_nonpadding is not None:
xs = xs * x_nonpadding[:, None, :]
xs = xs.transpose(1, -1)
#'''
werdur = self.werdur_linear(xs) * x_nonpadding[:, :, None] # (B, Tmax)
to_be_edited = self.edit_linear(xs) * x_nonpadding[:, :, None] # (B, Tmax
return werdur, to_be_edited
class FastCorrectDecoder(FairseqNATDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
# try:
# self.mask = dictionary.mask()
# except:
# print("<mask> not found in dictionary!")
# self.mask = None
self.encoder_embed_dim = args.encoder_embed_dim
self.sg_length_pred = getattr(args, "sg_length_pred", False)
self.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
self.to_be_edited_size = getattr(args, "to_be_edited_size", 1)
self.edit_emb_dim = getattr(args, "edit_emb_dim", self.encoder_embed_dim // 2)
if getattr(args, "dur_predictor_type", "") == 'v2':
self.dur_predictor = DurationPredictor(idim=self.encoder_embed_dim, n_layers=5, n_chans=self.encoder_embed_dim, ffn_layers=2, ln_eps=1e-5, remove_edit_emb=False, to_be_edited_size=self.to_be_edited_size)
else:
raise ValueError("Other type is undefined")
@ensemble_decoder
def forward(self, normalize, encoder_out, prev_output_tokens, step=0, wer_dur=None, to_be_edited=None, for_wer_gather=None, debug_src_tokens=None, debug_tgt_tokens=None, **unused):
features, _ = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
wer_dur=wer_dur,
to_be_edited=to_be_edited, for_wer_gather=for_wer_gather, debug_src_tokens=debug_src_tokens, debug_tgt_tokens=debug_tgt_tokens
)
decoder_out = self.output_layer(features)
return F.log_softmax(decoder_out, -1) if normalize else decoder_out
@ensemble_decoder
def forward_length(self, normalize, encoder_out):
enc_feats = encoder_out.encoder_out # T x B x C
src_masks = encoder_out.encoder_padding_mask # B x T or None
enc_feats = _mean_pooling(enc_feats, src_masks)
if self.sg_length_pred:
enc_feats = enc_feats.detach()
length_out = F.linear(enc_feats, self.embed_length.weight)
return F.log_softmax(length_out, -1) if normalize else length_out
@ensemble_decoder
def forward_wer_dur_and_tbe(self, normalize, encoder_out):
enc_feats = encoder_out.encoder_out # T x B x C
src_masks = encoder_out.encoder_padding_mask # B x T or None
encoder_embedding = encoder_out.encoder_embedding # B x T x C
enc_feats = enc_feats.transpose(0, 1)
# enc_feats = _mean_pooling(enc_feats, src_masks)
if self.sg_length_pred:
enc_feats = enc_feats.detach()
src_masks = (~src_masks)
wer_dur_out, to_be_edited_out = self.dur_predictor(enc_feats, src_masks)
closest = None
return wer_dur_out, to_be_edited_out, closest
def extract_features(
self,
prev_output_tokens,
encoder_out=None,
early_exit=None,
wer_dur=None,
to_be_edited=None, for_wer_gather=None, debug_src_tokens=None, debug_tgt_tokens=None,
**unused
):
"""
Similar to *forward* but only return features.
Inputs:
prev_output_tokens: Tensor(B, T)
encoder_out: a dictionary of hidden states and masks
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
the LevenshteinTransformer decoder has full-attention to all generated tokens
"""
# embedding
src_embd = encoder_out.encoder_embedding
src_mask = encoder_out.encoder_padding_mask
src_mask = (
~src_mask
if src_mask is not None
else prev_output_tokens.new_ones(*src_embd.size()[:2]).bool()
)
x, decoder_padding_mask = self.forward_embedding(
prev_output_tokens,
self.forward_wer_dur_embedding(
src_embd, src_mask, prev_output_tokens.ne(self.padding_idx), wer_dur, to_be_edited, for_wer_gather, debug_src_tokens=debug_src_tokens, debug_tgt_tokens=debug_tgt_tokens
),
)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for i, layer in enumerate(self.layers):
# early exit from the decoder.
if (early_exit is not None) and (i >= early_exit):
break
x, attn, _ = layer(
x,
encoder_out.encoder_out if encoder_out is not None else None,
encoder_out.encoder_padding_mask if encoder_out is not None else None,
self_attn_mask=None,
self_attn_padding_mask=decoder_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": attn, "inner_states": inner_states}
def forward_embedding(self, prev_output_tokens, states=None):
# embed positions
positions = (
self.embed_positions(prev_output_tokens)
if self.embed_positions is not None
else None
)
# embed tokens and positions
if states is None:
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
else:
x = states
if positions is not None:
x += positions
x = self.dropout_module(x)
decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)
return x, decoder_padding_mask
def forward_wer_dur_embedding(self, src_embeds, src_masks, tgt_masks, wer_dur, to_be_edited, for_wer_gather=None, debug_src_tokens=None, debug_tgt_tokens=None):
batch_size, _, hidden_size = src_embeds.shape
for_wer_gather = for_wer_gather[:, :, None].long()
to_reshape = torch.gather(src_embeds, 1, for_wer_gather.repeat(1, 1, src_embeds.shape[2]))
to_reshape = to_reshape * tgt_masks[:, :, None]
return to_reshape
def forward_length_prediction(self, length_out, encoder_out, tgt_tokens=None):
enc_feats = encoder_out.encoder_out # T x B x C
src_masks = encoder_out.encoder_padding_mask # B x T or None
if self.pred_length_offset:
if src_masks is None:
src_lengs = enc_feats.new_ones(enc_feats.size(1)).fill_(
enc_feats.size(0)
)
else:
src_lengs = (~src_masks).transpose(0, 1).type_as(enc_feats).sum(0)
src_lengs = src_lengs.long()
if tgt_tokens is not None:
# obtain the length target
tgt_lengs = tgt_tokens.ne(self.padding_idx).sum(1).long()
if self.pred_length_offset:
length_tgt = tgt_lengs - src_lengs + 128
else:
length_tgt = tgt_lengs
length_tgt = length_tgt.clamp(min=0, max=255)
else:
# predict the length target (greedy for now)
# TODO: implementing length-beam
pred_lengs = length_out.max(-1)[1]
if self.pred_length_offset:
length_tgt = pred_lengs - 128 + src_lengs
else:
length_tgt = pred_lengs
return length_tgt
@register_model_architecture(
"fastcorrect", "fastcorrect"
)
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
| 29,151 | 36.470437 | 215 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/modules/stft_loss.py | # -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""STFT-based Loss modules."""
import librosa
import torch
from parallel_wavegan.losses import LogSTFTMagnitudeLoss, SpectralConvergengeLoss, stft
class STFTLoss(torch.nn.Module):
"""STFT loss module."""
def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window",
use_mel_loss=False):
"""Initialize STFT loss module."""
super(STFTLoss, self).__init__()
self.fft_size = fft_size
self.shift_size = shift_size
self.win_length = win_length
self.window = getattr(torch, window)(win_length)
self.spectral_convergenge_loss = SpectralConvergengeLoss()
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
self.use_mel_loss = use_mel_loss
self.mel_basis = None
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T).
y (Tensor): Groundtruth signal (B, T).
Returns:
Tensor: Spectral convergence loss value.
Tensor: Log STFT magnitude loss value.
"""
x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window)
y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window)
if self.use_mel_loss:
if self.mel_basis is None:
self.mel_basis = torch.from_numpy(librosa.filters.mel(22050, self.fft_size, 80)).cuda().T
x_mag = x_mag @ self.mel_basis
y_mag = y_mag @ self.mel_basis
sc_loss = self.spectral_convergenge_loss(x_mag, y_mag)
mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
return sc_loss, mag_loss
class MultiResolutionSTFTLoss(torch.nn.Module):
"""Multi resolution STFT loss module."""
def __init__(self,
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window",
use_mel_loss=False):
"""Initialize Multi resolution STFT loss module.
Args:
fft_sizes (list): List of FFT sizes.
hop_sizes (list): List of hop sizes.
win_lengths (list): List of window lengths.
window (str): Window function type.
"""
super(MultiResolutionSTFTLoss, self).__init__()
assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
self.stft_losses = torch.nn.ModuleList()
for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):
self.stft_losses += [STFTLoss(fs, ss, wl, window, use_mel_loss)]
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T).
y (Tensor): Groundtruth signal (B, T).
Returns:
Tensor: Multi resolution spectral convergence loss value.
Tensor: Multi resolution log STFT magnitude loss value.
"""
sc_loss = 0.0
mag_loss = 0.0
for f in self.stft_losses:
sc_l, mag_l = f(x, y)
sc_loss += sc_l
mag_loss += mag_l
sc_loss /= len(self.stft_losses)
mag_loss /= len(self.stft_losses)
return sc_loss, mag_loss
| 3,470 | 32.375 | 105 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/modules/tts_modules.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import math
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
from modules.operations import SinusoidalPositionalEmbedding, OPERATIONS_ENCODER, ConvSeparable
from utils.world_utils import build_activation
from utils.hparams import hparams
DEFAULT_MAX_SOURCE_POSITIONS = 2000
DEFAULT_MAX_TARGET_POSITIONS = 2000
class TransformerEncoderLayer(nn.Module):
def __init__(self, layer, hidden_size, dropout):
super().__init__()
self.layer = layer
self.hidden_size = hidden_size
self.dropout = dropout
self.op = OPERATIONS_ENCODER[layer]()
def forward(self, x, **kwargs):
return self.op(x, **kwargs)
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""
def __init__(self, nout, dim=-1):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=1e-12)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
class DurationPredictor(torch.nn.Module):
"""Duration predictor module.
This is a module of duration predictor described in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
The duration predictor predicts a duration of each frame in log domain from the hidden embeddings of encoder.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
Note:
The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`,
the outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
"""
def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0, padding='SAME'):
"""Initilize duration predictor module.
Args:
idim (int): Input dimension.
n_layers (int, optional): Number of convolutional layers.
n_chans (int, optional): Number of channels of convolutional layers.
kernel_size (int, optional): Kernel size of convolutional layers.
dropout_rate (float, optional): Dropout rate.
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(DurationPredictor, self).__init__()
self.offset = offset
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
self.padding = padding
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
if hparams['predictor_layer_type'] == 'conv':
self.conv += [torch.nn.Sequential(
torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
build_activation(hparams['activation']),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate)
)]
else:
assert hparams['predictor_layer_type'] == 'sepconv'
self.conv += [torch.nn.Sequential(
ConvSeparable(in_chans, n_chans, kernel_size),
build_activation(hparams['activation']),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate)
)]
self.linear = torch.nn.Linear(n_chans, 1)
def _forward(self, xs, x_masks=None, is_inference=False):
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
if self.padding == 'SAME':
xs = F.pad(xs, [self.kernel_size // 2, self.kernel_size // 2])
elif self.padding == 'LEFT':
xs = F.pad(xs, [self.kernel_size - 1, 0])
xs = f(xs) # (B, C, Tmax)
if x_masks is not None:
xs = xs * (1 - x_masks.float())[:, None, :]
# NOTE: calculate in log domain
xs = self.linear(xs.transpose(1, -1)).squeeze(-1) # (B, Tmax)
if is_inference:
# NOTE: calculate in linear domain
xs = torch.clamp(torch.round(xs.exp() - self.offset), min=0).long() # avoid negative value
if x_masks is not None:
xs = xs.masked_fill(x_masks, 0.0)
return xs
def forward(self, xs, x_masks=None):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
Returns:
Tensor: Batch of predicted durations in log domain (B, Tmax).
"""
return self._forward(xs, x_masks, False)
def inference(self, xs, x_masks=None):
"""Inference duration.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
Returns:
LongTensor: Batch of predicted durations in linear domain (B, Tmax).
"""
return self._forward(xs, x_masks, True)
class DurationPredictorLoss(torch.nn.Module):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0, reduction="none"):
"""Initilize duration predictor loss module.
Args:
offset (float, optional): Offset value to avoid nan in log domain.
reduction (str): Reduction type in loss calculation.
"""
super(DurationPredictorLoss, self).__init__()
self.criterion = torch.nn.MSELoss(reduction=reduction)
self.offset = offset
def forward(self, outputs, targets, nonpadding):
"""Calculate forward propagation.
Args:
outputs (Tensor): Batch of prediction durations in log domain (B, T)
targets (LongTensor): Batch of groundtruth durations in linear domain (B, T)
Returns:
Tensor: Mean squared error loss value.
Note:
`outputs` is in log domain but `targets` is in linear domain.
"""
# NOTE: outputs is in log domain while targets in linear
targets = torch.log(targets.float() + self.offset)
loss = self.criterion(outputs, targets.float())
loss = (loss * nonpadding).sum() / nonpadding.sum()
return loss
def pad_list(xs, pad_value, max_len=None):
"""Perform padding for the list of tensors.
Args:
xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
pad_value (float): Value for padding.
Returns:
Tensor: Padded tensor (B, Tmax, `*`).
Examples:
>>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
>>> x
[tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
>>> pad_list(x, 0)
tensor([[1., 1., 1., 1.],
[1., 1., 0., 0.],
[1., 0., 0., 0.]])
"""
n_batch = len(xs)
if max_len is None:
max_len = max(x.size(0) for x in xs)
pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
for i in range(n_batch):
pad[i, :min(xs[i].size(0), max_len)] = xs[i][:max_len]
return pad
class LengthRegulator(torch.nn.Module):
"""Length regulator module for feed-forward Transformer.
This is a module of length regulator described in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
The length regulator expands char or phoneme-level embedding features to frame-level by repeating each
feature based on the corresponding predicted durations.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, pad_value=0.0):
"""Initilize length regulator module.
Args:
pad_value (float, optional): Value used for padding.
"""
super(LengthRegulator, self).__init__()
self.pad_value = pad_value
def forward(self, ds, ilens, alpha=1.0, max_len=None):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of sequences of char or phoneme embeddings (B, Tmax, D).
ds (LongTensor): Batch of durations of each frame (B, T).
ilens (LongTensor): Batch of input lengths (B,).
alpha (float, optional): Alpha value to control speed of speech.
Returns:
Tensor: replicated input tensor based on durations (B, T*, D).
"""
assert alpha > 0
if alpha != 1.0:
ds = torch.round(ds.float() * alpha).long()
ds = [d[:ilen] for d, ilen in zip(ds, ilens)]
mel2ph = [self._repeat_one_sequence(torch.arange(len(d)).to(d.device), d) + 1 for d in ds]
return pad_list(mel2ph, 0, max_len).long()
def _repeat_one_sequence(self, x, d):
"""Repeat each frame according to duration.
Examples:
>>> x = torch.tensor([[1], [2], [3]])
tensor([[1],
[2],
[3]])
>>> d = torch.tensor([1, 2, 3])
tensor([1, 2, 3])
>>> self._repeat_one_sequence(x, d)
tensor([[1],
[2],
[2],
[3],
[3],
[3]])
"""
if d.sum() == 0:
logging.warn("all of the predicted durations are 0. fill 0 with 1.")
d = d.fill_(1)
return torch.cat([x_.repeat(int(d_), 1) for x_, d_ in zip(x, d) if d_ != 0], dim=0)
class PitchPredictor(torch.nn.Module):
def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5,
dropout_rate=0.1, padding='SAME'):
"""Initilize pitch predictor module.
Args:
idim (int): Input dimension.
n_layers (int, optional): Number of convolutional layers.
n_chans (int, optional): Number of channels of convolutional layers.
kernel_size (int, optional): Kernel size of convolutional layers.
dropout_rate (float, optional): Dropout rate.
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(PitchPredictor, self).__init__()
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
self.padding = padding
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
if hparams['predictor_layer_type'] == 'conv':
self.conv += [torch.nn.Sequential(
torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
build_activation(hparams['activation']),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate)
)]
else:
assert hparams['predictor_layer_type'] == 'sepconv'
self.conv += [torch.nn.Sequential(
ConvSeparable(in_chans, n_chans, kernel_size),
build_activation(hparams['activation']),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate)
)]
self.linear = torch.nn.Linear(n_chans, odim)
self.embed_positions = SinusoidalPositionalEmbedding(idim, 0, init_size=4096)
self.pos_embed_alpha = nn.Parameter(torch.Tensor([1]))
def forward(self, xs):
"""
:param xs: [B, T, H]
:return: [B, T, H]
"""
positions = self.pos_embed_alpha * self.embed_positions(xs[..., 0])
xs = xs + positions
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
if self.padding == 'SAME':
xs = F.pad(xs, [self.kernel_size // 2, self.kernel_size // 2])
elif self.padding == 'LEFT':
xs = F.pad(xs, [self.kernel_size - 1, 0])
xs = f(xs) # (B, C, Tmax)
# NOTE: calculate in log domain
xs = self.linear(xs.transpose(1, -1)) # (B, Tmax, H)
return xs
class EnergyPredictor(PitchPredictor):
pass
class TransformerEncoder(nn.Module):
def __init__(self, arch, embed_tokens, last_ln=True):
super().__init__()
self.arch = arch
self.num_layers = hparams['enc_layers']
self.hidden_size = hparams['hidden_size']
self.embed_tokens = embed_tokens
self.padding_idx = embed_tokens.padding_idx
embed_dim = embed_tokens.embedding_dim
self.dropout = hparams['dropout']
self.embed_scale = math.sqrt(embed_dim)
self.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
self.embed_positions = SinusoidalPositionalEmbedding(
embed_dim, self.padding_idx,
init_size=self.max_source_positions + self.padding_idx + 1,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(self.arch[i], self.hidden_size, self.dropout)
for i in range(self.num_layers)
])
self.last_ln = last_ln
if last_ln:
self.layer_norm = LayerNorm(embed_dim)
def forward_embedding(self, src_tokens):
# embed tokens and positions
embed = self.embed_scale * self.embed_tokens(src_tokens)
positions = self.embed_positions(src_tokens)
# x = self.prenet(x)
x = embed + positions
x = F.dropout(x, p=self.dropout, training=self.training)
return x, embed
def forward(self, src_tokens):
"""
:param src_tokens: [B, T]
:return: {
'encoder_out': [T x B x C]
'encoder_padding_mask': [B x T]
'encoder_embedding': [B x T x C]
'attn_w': []
}
"""
x, encoder_embedding = self.forward_embedding(src_tokens)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx).data
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask=encoder_padding_mask)
if self.last_ln:
x = self.layer_norm(x)
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
'encoder_embedding': encoder_embedding, # B x T x C
'attn_w': []
}
class LightSpeechDecoder(nn.Module):
def __init__(self, arch, hidden_size=None, dropout=None):
super().__init__()
self.arch = arch # arch = encoder op code
self.num_layers = len(arch)
if hidden_size is not None:
embed_dim = self.hidden_size = hidden_size
else:
embed_dim = self.hidden_size = hparams['hidden_size']
if dropout is not None:
self.dropout = dropout
else:
self.dropout = hparams['dropout']
self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS
self.padding_idx = 0
self.pos_embed_alpha = nn.Parameter(torch.Tensor([1]))
self.embed_positions = SinusoidalPositionalEmbedding(
embed_dim, self.padding_idx,
init_size=self.max_source_positions + self.padding_idx + 1,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(self.arch[i], self.hidden_size, self.dropout)
for i in range(self.num_layers)
])
self.layer_norm = nn.LayerNorm(embed_dim)
def forward(self, x, require_w=False):
"""
:param x: [B, T, C]
:param require_w: True if this module needs to return weight matrix
:return: [B, T, C]
"""
padding_mask = x.abs().sum(-1).eq(0).data
positions = self.pos_embed_alpha * self.embed_positions(x[..., 0])
x = x + positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# encoder layers
attn_w = []
if require_w:
for layer in self.layers:
x, attn_w_i = layer(x, encoder_padding_mask=padding_mask, require_w=require_w)
attn_w.append(attn_w_i)
else:
for layer in self.layers:
x = layer(x, encoder_padding_mask=padding_mask) # remember to assign back to x
x = self.layer_norm(x)
x = x.transpose(0, 1)
return (x, attn_w) if require_w else x
| 17,051 | 37.579186 | 116 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/modules/lightspeech.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from modules.operations import *
from modules.tts_modules import TransformerEncoder, LightSpeechDecoder, DurationPredictor, LengthRegulator, PitchPredictor, EnergyPredictor
import utils
from utils.world_utils import f0_to_coarse_torch, restore_pitch
class LightSpeech(nn.Module):
def __init__(self, arch, dictionary, out_dims=None):
super().__init__()
self.dictionary = dictionary
self.padding_idx = dictionary.pad()
if isinstance(arch, str):
self.arch = list(map(int, arch.strip().split()))
else:
assert isinstance(arch, (list, tuple))
self.arch = arch
self.enc_layers = hparams['enc_layers']
self.dec_layers = hparams['dec_layers']
self.enc_arch = self.arch[:self.enc_layers]
self.dec_arch = self.arch[self.enc_layers:self.enc_layers + self.dec_layers]
self.hidden_size = hparams['hidden_size']
self.encoder_embed_tokens = nn.Embedding(len(self.dictionary), self.hidden_size, self.padding_idx)
self.encoder = TransformerEncoder(self.enc_arch, self.encoder_embed_tokens)
self.decoder = LightSpeechDecoder(self.dec_arch) if hparams['dec_layers'] > 0 else None
self.mel_out = Linear(self.hidden_size,
hparams['audio_num_mel_bins'] if out_dims is None else out_dims,
bias=True)
if hparams['use_spk_id']:
self.spk_embed_proj = nn.Embedding(hparams['num_spk'], self.hidden_size)
else:
self.spk_embed_proj = Linear(256, self.hidden_size, bias=True)
self.dur_predictor = DurationPredictor(
self.hidden_size,
hparams['dur_predictor_layer'],
n_chans=hparams['predictor_hidden'],
dropout_rate=0.5, padding=hparams['ffn_padding'],
kernel_size=hparams['dur_predictor_kernel'])
self.length_regulator = LengthRegulator()
if hparams['use_pitch_embed']:
self.pitch_embed = nn.Embedding(300, self.hidden_size, self.padding_idx)
self.pitch_predictor = PitchPredictor(
self.hidden_size, hparams['pitch_predictor_layer'], n_chans=hparams['predictor_hidden'], dropout_rate=0.5,
padding=hparams['ffn_padding'], odim=2)
self.pitch_do = nn.Dropout(0.5)
if hparams['use_energy_embed']:
self.energy_predictor = EnergyPredictor(
self.hidden_size, hparams['energy_predictor_layer'], n_chans=hparams['predictor_hidden'], dropout_rate=0.5, odim=1,
padding=hparams['ffn_padding'])
self.energy_embed = nn.Embedding(256, self.hidden_size, self.padding_idx)
self.energy_do = nn.Dropout(0.5)
def forward(self, src_tokens, mel2ph, spk_embed=None,
ref_mels=None, pitch=None, uv=None, energy=None, skip_decoder=False):
"""
:param src_tokens: [B, T]
:param mel2ph:
:param spk_embed:
:param ref_mels:
:return: {
'mel_out': [B, T_s, 80], 'dur': [B, T_t],
'w_st_pred': [heads, B, tokens], 'w_st': [heads, B, tokens],
'encoder_out_noref': [B, T_t, H]
}
"""
ret = {}
#with utils.Timer('encoder', print_time=hparams['profile_infer']):
encoder_outputs = self.encoder(src_tokens)
encoder_out = encoder_outputs['encoder_out'] # [T, B, C]
src_nonpadding = (src_tokens > 0).float().permute(1, 0)[:, :, None]
if hparams['use_spk_embed'] and spk_embed is not None:
spk_embed = self.spk_embed_proj(spk_embed)[None, :, :]
encoder_out += spk_embed
encoder_out = encoder_out * src_nonpadding # [T, B, C]
dur_input = encoder_out.transpose(0, 1)
if hparams['predictor_sg']:
dur_input = dur_input.detach()
#with utils.Timer('dur', print_time=hparams['profile_infer']):
if mel2ph is None:
dur = self.dur_predictor.inference(dur_input, src_tokens == 0)
if not hparams['sep_dur_loss']:
dur[src_tokens == self.dictionary.seg()] = 0
ret['mel2ph'] = mel2ph = self.length_regulator(dur, (src_tokens != 0).sum(-1))[..., 0]
else:
ret['dur'] = self.dur_predictor(dur_input, src_tokens == 0)
# expand encoder out to make decoder inputs
decoder_inp = F.pad(encoder_out, [0, 0, 0, 0, 1, 0])
mel2ph_ = mel2ph.permute([1, 0])[..., None].repeat([1, 1, encoder_out.shape[-1]]).contiguous()
decoder_inp = torch.gather(decoder_inp, 0, mel2ph_).transpose(0, 1) # [B, T, H]
ret['decoder_inp_origin'] = decoder_inp_origin = decoder_inp
# add pitch embed
if hparams['use_pitch_embed']:
#with utils.Timer('pitch', print_time=hparams['profile_infer']):
decoder_inp = decoder_inp + self.add_pitch(decoder_inp_origin, pitch, uv, mel2ph, ret)
# add energy embed
if hparams['use_energy_embed']:
with utils.Timer('energy', print_time=hparams['profile_infer']):
decoder_inp = decoder_inp + self.add_energy(decoder_inp_origin, energy, ret)
decoder_inp = decoder_inp * (mel2ph != 0).float()[:, :, None]
ret['decoder_inp'] = decoder_inp
if skip_decoder:
return ret
x = decoder_inp
if hparams['dec_layers'] > 0:
#with utils.Timer('decoder', print_time=hparams['profile_infer']):
x = self.decoder(x)
x = self.mel_out(x)
x = x * (mel2ph != 0).float()[:, :, None]
ret['mel_out'] = x
return ret
def decode_with_pred_pitch(self, decoder_inp, mel2ph):
if hparams['use_ref_enc']:
assert False
pitch_embed = self.add_pitch(decoder_inp, None, None, mel2ph, {})
decoder_inp = decoder_inp + self.pitch_do(pitch_embed)
decoder_inp = decoder_inp * (mel2ph != 0).float()[:, :, None]
x = decoder_inp
x = self.decoder(x)
x = self.mel_out(x)
x = x * (mel2ph != 0).float()[:, :, None]
return x
# run other modules
def add_energy(self, decoder_inp, energy, ret):
if hparams['predictor_sg']:
decoder_inp = decoder_inp.detach()
ret['energy_pred'] = energy_pred = self.energy_predictor(decoder_inp)[:, :, 0]
if energy is None:
energy = energy_pred
energy = torch.clamp(energy * 256 // 4, max=255).long()
energy_embed = self.energy_embed(energy)
return energy_embed
def add_pitch(self, decoder_inp_origin, pitch, uv, mel2ph, ret):
pp_inp = decoder_inp_origin
if hparams['predictor_sg']:
pp_inp = pp_inp.detach()
#with utils.Timer('pitch_predictor', print_time=hparams['profile_infer']):
ret['pitch_logits'] = pitch_logits = self.pitch_predictor(pp_inp)
if pitch is not None: # train
pitch_padding = pitch == -200
#with utils.Timer('restore_pitch', print_time=hparams['profile_infer']):
pitch_restore = restore_pitch(pitch, uv if hparams['use_uv'] else None, hparams,
pitch_padding=pitch_padding)
ret['pitch'] = pitch_restore
#with utils.Timer('f0_to_coarse', print_time=hparams['profile_infer']):
pitch_restore = f0_to_coarse_torch(pitch_restore)
#with utils.Timer('pitch_embed', print_time=hparams['profile_infer']):
pitch_embed = self.pitch_embed(pitch_restore)
else: # test
pitch_padding = (mel2ph == 0)
pitch = pitch_logits[:, :, 0]
uv = pitch_logits[:, :, 1] > 0
if not hparams['use_uv']:
uv = pitch < -3.5
#with utils.Timer('restore_pitch', print_time=hparams['profile_infer']):
pitch_restore = restore_pitch(pitch, uv, hparams, pitch_padding=pitch_padding)
ret['pitch'] = pitch_restore
#with utils.Timer('f0_to_coarse', print_time=hparams['profile_infer']):
pitch_restore = f0_to_coarse_torch(pitch_restore)
#with utils.Timer('pitch_embed', print_time=hparams['profile_infer']):
pitch_embed = self.pitch_embed(pitch_restore)
return self.pitch_do(pitch_embed)
| 8,395 | 47.531792 | 139 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/modules/operations.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.onnx.operators
import torch.nn.functional as F
import utils
from utils.hparams import hparams
from utils.world_utils import build_activation
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if not export and torch.cuda.is_available():
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None, timestep=None, **kwargs):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = utils.make_positions(input, self.padding_idx)
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
class MultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
add_bias_kv=False, add_zero_attn=False, self_attention=False,
encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \
'value to be of the same size'
if self.qkv_same_dim:
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
else:
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.enable_torch_version = False
if hasattr(F, "multi_head_attention_forward"):
self.enable_torch_version = True
else:
self.enable_torch_version = False
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
else:
nn.init.xavier_uniform_(self.k_proj_weight)
nn.init.xavier_uniform_(self.v_proj_weight)
nn.init.xavier_uniform_(self.q_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query, key, value,
key_padding_mask=None,
incremental_state=None,
need_weights=True,
static_kv=False,
attn_mask=None,
before_softmax=False,
need_head_weights=False,
enc_dec_attn_constraint_mask=None
):
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if self.enable_torch_version and incremental_state is None and not static_kv:
if self.qkv_same_dim:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
self.in_proj_weight,
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask)
else:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
torch.empty([0]),
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
# self-attention
q, k, v = self.in_proj_qkv(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k = self.in_proj_k(key)
v = self.in_proj_v(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if 'prev_key' in saved_state:
prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
k = torch.cat((prev_key, k), dim=1)
if 'prev_value' in saved_state:
prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
v = torch.cat((prev_value, v), dim=1)
if 'prev_key_padding_mask' in saved_state and saved_state['prev_key_padding_mask'] is not None:
prev_key_padding_mask = saved_state['prev_key_padding_mask']
if static_kv:
key_padding_mask = prev_key_padding_mask
else:
key_padding_mask = torch.cat((prev_key_padding_mask, key_padding_mask), dim=1)
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state['prev_key_padding_mask'] = key_padding_mask
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
float('-inf'),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
else:
attn_weights = None
return attn, (attn_weights, attn_logits)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_q(self, query):
if self.qkv_same_dim:
return self._in_proj(query, end=self.embed_dim)
else:
bias = self.in_proj_bias
if bias is not None:
bias = bias[:self.embed_dim]
return F.linear(query, self.q_proj_weight, bias)
def in_proj_k(self, key):
if self.qkv_same_dim:
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
else:
weight = self.k_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[self.embed_dim:2 * self.embed_dim]
return F.linear(key, weight, bias)
def in_proj_v(self, value):
if self.qkv_same_dim:
return self._in_proj(value, start=2 * self.embed_dim)
else:
weight = self.v_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[2 * self.embed_dim:]
return F.linear(value, weight, bias)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(
self,
incremental_state,
'attn_state',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(
self,
incremental_state,
'attn_state',
buffer,
)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
return attn_weights
def clear_buffer(self, incremental_state=None):
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
del saved_state['prev_key']
if 'prev_value' in saved_state:
del saved_state['prev_value']
self._set_input_buffer(incremental_state, saved_state)
class ConvSeparable(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0, dropout=0):
super(ConvSeparable, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.padding = padding
self.depthwise_conv = nn.Conv1d(in_channels, in_channels, self.kernel_size, padding=padding, groups=in_channels, bias=False)
self.pointwise_conv = nn.Conv1d(in_channels, out_channels, 1)
std = math.sqrt((4 * (1.0 - dropout)) / (kernel_size * out_channels))
nn.init.normal_(self.depthwise_conv.weight, mean=0, std=std)
nn.init.normal_(self.pointwise_conv.weight, mean=0, std=std)
nn.init.constant_(self.pointwise_conv.bias, 0)
def forward(self, x):
# x : B * C * T
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
class EncSepConvLayer(nn.Module):
def __init__(self, c, kernel_size, dropout, activation):
super().__init__()
self.layer_norm = LayerNorm(c)
self.dropout = dropout
self.activation_fn = build_activation(activation)
self.conv1 = ConvSeparable(c, c, kernel_size, padding=kernel_size // 2, dropout=dropout)
self.conv2 = ConvSeparable(c, c, kernel_size, padding=kernel_size // 2, dropout=dropout)
def forward(self, x, encoder_padding_mask=None, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm.training = layer_norm_training
residual = x
x = self.layer_norm(x)
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.t().unsqueeze(-1), 0)
x = x.permute(1, 2, 0)
x = self.activation_fn(self.conv1(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.activation_fn(self.conv2(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.permute(2, 0, 1)
x = residual + x
return x
class EncTransformerAttnLayer(nn.Module):
def __init__(self, c, num_heads, dropout, attention_dropout=0.0):
super().__init__()
self.dropout = dropout
self.self_attn = MultiheadAttention(c, num_heads, self_attention=True, dropout=attention_dropout, bias=False)
self.self_attn_layer_norm = LayerNorm(c)
def forward(self, x, encoder_padding_mask=None, **kwargs):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.self_attn_layer_norm.training = layer_norm_training
residual = x
x = self.self_attn_layer_norm(x)
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.t().unsqueeze(-1), 0)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
return x
class EncTransformerFFNLayer(nn.Module):
def __init__(self, hidden_size, filter_size, kernel_size, dropout, activation, padding="SAME"):
super().__init__()
self.kernel_size = kernel_size
self.dropout = dropout
self.activation_fn = build_activation(activation)
self.layer_norm = LayerNorm(hidden_size)
if padding == 'SAME':
self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2)
elif padding == 'LEFT':
self.ffn_1 = nn.Sequential(
nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
nn.Conv1d(hidden_size, filter_size, kernel_size)
)
self.ffn_2 = Linear(filter_size, hidden_size)
def forward(self, x, encoder_padding_mask=None, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm.training = layer_norm_training
residual = x
x = self.layer_norm(x)
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.t().unsqueeze(-1), 0)
x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
x = self.activation_fn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.ffn_2(x)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
return x
class IdentityLayer(nn.Module):
def __init__(self,):
super().__init__()
def forward(self, x, *args, **kwargs):
return x
OPERATIONS_ENCODER = {
1: lambda : EncSepConvLayer(hparams['hidden_size'], 1, hparams['dropout'], hparams['activation']), # h, num_heads, dropout
2: lambda : EncSepConvLayer(hparams['hidden_size'], 5, hparams['dropout'], hparams['activation']),
3: lambda : EncSepConvLayer(hparams['hidden_size'], 9, hparams['dropout'], hparams['activation']),
4: lambda : EncSepConvLayer(hparams['hidden_size'], 13, hparams['dropout'], hparams['activation']),
5: lambda : EncSepConvLayer(hparams['hidden_size'], 17, hparams['dropout'], hparams['activation']),
6: lambda : EncSepConvLayer(hparams['hidden_size'], 21, hparams['dropout'], hparams['activation']),
7: lambda : EncSepConvLayer(hparams['hidden_size'], 25, hparams['dropout'], hparams['activation']),
8: lambda : EncTransformerAttnLayer(hparams['hidden_size'], 2, hparams['dropout']),
9: lambda : EncTransformerAttnLayer(hparams['hidden_size'], 4, hparams['dropout']),
10: lambda : EncTransformerAttnLayer(hparams['hidden_size'], 8, hparams['dropout']),
11: lambda : EncTransformerFFNLayer(hparams['hidden_size'], hparams['filter_size'], hparams['ffn_kernel_size'], hparams['dropout'], hparams['activation']),
12: lambda : IdentityLayer(),
}
| 24,598 | 41.930192 | 159 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/utils/stft.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm) ** 2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window is not None:
assert (win_length >= filter_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length / 2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length / 2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
| 6,019 | 35.26506 | 97 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/utils/pl_utils.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import re
import sys
import copy
import glob
import tqdm
import logging
import itertools
import subprocess
import threading
import traceback
from functools import wraps
import numpy as np
import torch
from torch.cuda._utils import _get_device_index
import torch.optim
import torch.utils.data
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from pytorch_lightning.callbacks import GradientAccumulationScheduler
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.nn.parallel._functions import Gather
from torch.optim.optimizer import Optimizer
def get_a_var(obj): # pragma: no cover
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def data_loader(fn):
"""
Decorator to make any fx with this use the lazy property
:param fn:
:return:
"""
wraps(fn)
attr_name = '_lazy_' + fn.__name__
def _get_data_loader(self):
try:
value = getattr(self, attr_name)
except AttributeError:
try:
value = fn(self) # Lazy evaluation, done only once.
if (
value is not None and
not isinstance(value, list) and
fn.__name__ in ['test_dataloader', 'val_dataloader']
):
value = [value]
except AttributeError as e:
# Guard against AttributeError suppression. (Issue #142)
traceback.print_exc()
error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e)
raise RuntimeError(error) from e
setattr(self, attr_name, value) # Memoize evaluation.
return value
return _get_data_loader
def parallel_apply(modules, inputs, kwargs_tup=None, devices=None): # pragma: no cover
r"""Applies each `module` in :attr:`modules` in parallel on arguments
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
on each of :attr:`devices`.
Args:
modules (Module): modules to be parallelized
inputs (tensor): inputs to the modules
devices (list of int or torch.device): CUDA devices
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
:attr:`devices` (if given) should all have same length. Moreover, each
element of :attr:`inputs` can either be a single object as the only argument
to a module, or a collection of positional arguments.
"""
assert len(modules) == len(inputs)
if kwargs_tup is not None:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
devices = list(map(lambda x: _get_device_index(x, True), devices))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, kwargs, device=None):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
# ---------------
# CHANGE
if module.training:
output = module.training_step(*input, **kwargs)
elif module.testing:
output = module.test_step(*input, **kwargs)
else:
output = module.validation_step(*input, **kwargs)
# ---------------
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
# TODO: fix hack (maybe not a hack)
# make sure each module knows what training state it's in...
# fixes weird bug where copies are out of sync
root_m = modules[0]
for m in modules[1:]:
m.training = root_m.training
m.testing = root_m.testing
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, kwargs, device))
for i, (module, input, kwargs, device) in
enumerate(zip(modules, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
def _find_tensors(obj): # pragma: no cover
r"""
Recursively find all tensors contained in the specified object.
"""
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
class DDP(DistributedDataParallel):
"""
Override the forward call in lightning so it goes to training and validation step respectively
"""
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def forward(self, *inputs, **kwargs): # pragma: no cover
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
# --------------
# LIGHTNING MOD
# --------------
# normal
# output = self.module(*inputs[0], **kwargs[0])
# lightning
if self.module.training:
output = self.module.training_step(*inputs[0], **kwargs[0])
elif self.module.testing:
output = self.module.test_step(*inputs[0], **kwargs[0])
else:
output = self.module.validation_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
# normal
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled():
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
return output
class DP(DataParallel):
"""
Override the forward call in lightning so it goes to training and validation step respectively
"""
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
for t in itertools.chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(self.src_device_obj, t.device))
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
# lightning
if self.module.training:
return self.module.training_step(*inputs[0], **kwargs[0])
elif self.module.testing:
return self.module.test_step(*inputs[0], **kwargs[0])
else:
return self.module.validation_step(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
dim = 0
def gather_map(outputs):
out = outputs[0]
if out is None:
return None
if isinstance(out, torch.Tensor):
return Gather.apply(output_device, dim, *outputs)
if isinstance(out, dict):
if not all((len(out) == len(d) for d in outputs)):
raise ValueError('All dicts must have the same number of keys')
return type(out)(((k, gather_map([d[k] for d in outputs]))
for k in out))
if isinstance(out, float) or isinstance(out, int):
return outputs
return type(out)(map(gather_map, zip(*outputs)))
# Recursive function calls like this create reference cycles.
# Setting the function to None clears the refcycle.
try:
res = gather_map(outputs)
finally:
gather_map = None
return res
class GradientAccumulationScheduler:
def __init__(self, scheduling: dict):
if scheduling == {}: # empty dict error
raise TypeError("Empty dict cannot be interpreted correct")
for key in scheduling.keys():
if not isinstance(key, int) or not isinstance(scheduling[key], int):
raise TypeError("All epoches and accumulation factor must be integers")
minimal_epoch = min(scheduling.keys())
if minimal_epoch < 1:
msg = f"Epochs indexing from 1, epoch {minimal_epoch} cannot be interpreted correct"
raise IndexError(msg)
elif minimal_epoch != 1: # if user didnt define first epoch accumulation factor
scheduling.update({1: 1})
self.scheduling = scheduling
self.epochs = sorted(scheduling.keys())
def on_epoch_begin(self, epoch, trainer):
epoch += 1 # indexing epochs from 1
for i in reversed(range(len(self.epochs))):
if epoch >= self.epochs[i]:
trainer.accumulate_grad_batches = self.scheduling.get(self.epochs[i])
break
class LatestModelCheckpoint(ModelCheckpoint):
def __init__(self, filepath, monitor='val_loss', verbose=0, num_keep=5, save_weights_only=False,
mode='auto', period=1, prefix='model'):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
os.makedirs(filepath, exist_ok=True)
self.num_keep = num_keep
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_check = 0
self.prefix = prefix
self.best_k_models = {}
# {filename: monitor}
self.kth_best_model = ''
self.save_top_k = 1
self.task = None
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
self.mode = 'min'
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
self.mode = 'max'
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
self.mode = 'max'
else:
self.monitor_op = np.less
self.best = np.Inf
self.mode = 'min'
if os.path.exists(f'{self.filepath}/best_valid.npy'):
self.best = np.load(f'{self.filepath}/best_valid.npy')[0]
def get_all_ckpts(self):
return sorted(glob.glob(f'{self.filepath}/{self.prefix}_ckpt_steps_*.ckpt'),
key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0]))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_check += 1
best_filepath = f'{self.filepath}/{self.prefix}_ckpt_best.pt'
if self.epochs_since_last_check >= self.period:
self.epochs_since_last_check = 0
filepath = f'{self.filepath}/{self.prefix}_ckpt_steps_{self.task.global_step}.ckpt'
if self.verbose > 0:
logging.info(f'Epoch {epoch:05d}@{self.task.global_step}: saving model to {filepath}')
self._save_model(filepath)
for old_ckpt in self.get_all_ckpts()[5:]:
subprocess.check_call(f'rm -rf "{old_ckpt}"', shell=True)
if self.verbose > 0:
logging.info(f'Delete ckpt: {os.path.basename(old_ckpt)}')
current = logs.get(self.monitor)
if current is not None:
if self.monitor_op(current, self.best):
self.best = current
if self.verbose > 0:
logging.info(
f'Epoch {epoch:05d}@{self.task.global_step}: {self.monitor} reached'
f' {current:0.5f} (best {self.best:0.5f}), saving model to'
f' {best_filepath} as top 1')
self._save_model(best_filepath)
np.save(f'{self.filepath}/best_valid.npy', [self.best])
class BaseTrainer:
def __init__(
self,
logger=True,
checkpoint_callback=True,
default_save_path=None,
gradient_clip_val=0,
process_position=0,
gpus=-1,
log_gpu_memory=None,
show_progress_bar=True,
track_grad_norm=-1,
check_val_every_n_epoch=1,
accumulate_grad_batches=1,
max_updates=1000,
min_epochs=1,
val_check_interval=1.0,
log_save_interval=100,
row_log_interval=10,
print_nan_grads=False,
weights_summary='full',
num_sanity_val_steps=5,
resume_from_checkpoint=None,
):
self.log_gpu_memory = log_gpu_memory
self.gradient_clip_val = gradient_clip_val
self.check_val_every_n_epoch = check_val_every_n_epoch
self.track_grad_norm = track_grad_norm
self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
self.process_position = process_position
self.weights_summary = weights_summary
self.max_updates = max_updates
self.min_epochs = min_epochs
self.num_sanity_val_steps = num_sanity_val_steps
self.print_nan_grads = print_nan_grads
self.resume_from_checkpoint = resume_from_checkpoint
self.default_save_path = default_save_path
# training bookeeping
self.total_batch_idx = 0
self.running_loss = []
self.avg_loss = 0
self.batch_idx = 0
self.tqdm_metrics = {}
self.callback_metrics = {}
self.num_val_batches = 0
self.num_training_batches = 0
self.num_test_batches = 0
self.get_train_dataloader = None
self.get_test_dataloaders = None
self.get_val_dataloaders = None
self.is_iterable_train_dataloader = False
# training state
self.model = None
self.testing = False
self.disable_validation = False
self.lr_schedulers = []
self.optimizers = None
self.global_step = 0
self.current_epoch = 0
self.total_batches = 0
# configure checkpoint callback
self.checkpoint_callback = checkpoint_callback
self.checkpoint_callback.save_function = self.save_checkpoint
self.weights_save_path = self.checkpoint_callback.filepath
# accumulated grads
self.configure_accumulated_gradients(accumulate_grad_batches)
# allow int, string and gpu list
if os.environ["CUDA_VISIBLE_DEVICES"] == '':
self.data_parallel_device_ids = None
self.root_gpu = None
else:
self.data_parallel_device_ids = [int(x) for x in os.environ["CUDA_VISIBLE_DEVICES"].split(",")]
self.root_gpu = self.data_parallel_device_ids[0]
# distributed backend choice
self.use_ddp = False
self.use_dp = False
self.single_gpu = False
self.distributed_backend = 'ddp' if self.num_gpus > 0 else 'dp'
self.set_distributed_mode(self.distributed_backend)
self.proc_rank = 0
self.world_size = 1
self.node_rank = 0
# can't init progress bar here because starting a new process
# means the progress_bar won't survive pickling
self.show_progress_bar = show_progress_bar
# logging
self.log_save_interval = log_save_interval
self.val_check_interval = val_check_interval
self.logger = logger
self.logger.rank = 0
self.row_log_interval = row_log_interval
@property
def num_gpus(self):
gpus = self.data_parallel_device_ids
if gpus is None:
return 0
else:
return len(gpus)
@property
def data_parallel(self):
return self.use_dp or self.use_ddp
def get_model(self):
is_dp_module = isinstance(self.model, (DDP, DP))
model = self.model.module if is_dp_module else self.model
return model
# -----------------------------
# MODEL TRAINING
# -----------------------------
def fit(self, model):
if self.use_ddp:
mp.spawn(self.ddp_train, nprocs=self.num_gpus, args=(model,))
elif self.use_dp:
self.dp_train(model)
elif self.single_gpu:
self.single_gpu_train(model)
else:
#assert False, "GPU not found"
self.cpu_train(model)
return 0
def init_optimizers(self, optimizers):
# single optimizer
if isinstance(optimizers, Optimizer):
return [optimizers], []
# two lists
elif len(optimizers) == 2 and isinstance(optimizers[0], list):
optimizers, lr_schedulers = optimizers
return optimizers, lr_schedulers
# single list or tuple
elif isinstance(optimizers, list) or isinstance(optimizers, tuple):
return optimizers, []
def run_pretrain_routine(self, model):
"""Sanity check a few things before starting actual training.
:param model:
"""
ref_model = model
if self.data_parallel:
ref_model = model.module
# give model convenience properties
ref_model.trainer = self
# set local properties on the model
self.copy_trainer_model_properties(ref_model)
# link up experiment object
if self.logger is not None:
ref_model.logger = self.logger
self.logger.save()
if self.use_ddp:
dist.barrier()
# set up checkpoint callback
# self.configure_checkpoint_callback()
# transfer data loaders from model
self.get_dataloaders(ref_model)
# track model now.
# if cluster resets state, the model will update with the saved weights
self.model = model
# restore training and model before hpc call
self.restore_weights(model)
# when testing requested only run test and return
if self.testing:
self.run_evaluation(test=True)
return
# check if we should run validation during training
self.disable_validation = self.num_val_batches == 0
# run tiny validation (if validation defined)
# to make sure program won't crash during val
ref_model.on_sanity_check_start()
ref_model.on_train_start()
if not self.disable_validation and self.num_sanity_val_steps > 0:
# init progress bars for validation sanity check
pbar = tqdm.tqdm(desc='Validation sanity check',
total=self.num_sanity_val_steps * len(self.get_val_dataloaders()),
leave=False, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch')
self.main_progress_bar = pbar
# dummy validation progress bar
self.val_progress_bar = tqdm.tqdm(disable=True)
self.evaluate(model, self.get_val_dataloaders(), self.num_sanity_val_steps, self.testing)
# close progress bars
self.main_progress_bar.close()
self.val_progress_bar.close()
# init progress bar
pbar = tqdm.tqdm(leave=True, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch',
file=sys.stdout)
self.main_progress_bar = pbar
# clear cache before training
if self.on_gpu:
torch.cuda.empty_cache()
# CORE TRAINING LOOP
self.train()
def test(self, model):
self.testing = True
self.fit(model)
@property
def training_tqdm_dict(self):
tqdm_dict = {
'step': '{}'.format(self.global_step),
}
tqdm_dict.update(self.tqdm_metrics)
return tqdm_dict
# --------------------
# restore ckpt
# --------------------
def restore_weights(self, model):
"""
To restore weights we have two cases.
First, attempt to restore hpc weights. If successful, don't restore
other weights.
Otherwise, try to restore actual weights
:param model:
:return:
"""
# clear cache before restore
if self.on_gpu:
torch.cuda.empty_cache()
if self.resume_from_checkpoint is not None and self.resume_from_checkpoint != '':
self.restore(self.resume_from_checkpoint, on_gpu=self.on_gpu)
else:
# restore weights if same exp version
self.restore_state_if_checkpoint_exists(model)
# wait for all models to restore weights
if self.use_ddp:
# wait for all processes to catch up
dist.barrier()
# clear cache after restore
if self.on_gpu:
torch.cuda.empty_cache()
def restore_state_if_checkpoint_exists(self, model):
did_restore = False
# do nothing if there's not dir or callback
no_ckpt_callback = (self.checkpoint_callback is None) or (not self.checkpoint_callback)
if no_ckpt_callback or not os.path.exists(self.checkpoint_callback.filepath):
return did_restore
# restore trainer state and model if there is a weight for this experiment
last_steps = -1
last_ckpt_name = None
# find last epoch
checkpoints = os.listdir(self.checkpoint_callback.filepath)
for name in checkpoints:
if '.ckpt' in name:
if 'steps_' in name:
steps = name.split('steps_')[1]
steps = int(re.sub('[^0-9]', '', steps))
if steps > last_steps:
last_steps = steps
last_ckpt_name = name
# restore last checkpoint
if last_ckpt_name is not None:
last_ckpt_path = os.path.join(self.checkpoint_callback.filepath, last_ckpt_name)
self.restore(last_ckpt_path, self.on_gpu)
logging.info(f'model and trainer restored from checkpoint: {last_ckpt_path}')
did_restore = True
return did_restore
def restore(self, checkpoint_path, on_gpu):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
# load model state
model = self.get_model()
# load the state_dict on the model automatically
model.load_state_dict(checkpoint['state_dict'])
if on_gpu:
model.cuda(self.root_gpu)
# load training state (affects trainer only)
self.restore_training_state(checkpoint)
model.global_step = self.global_step
del checkpoint
try:
if dist.is_initialized() and dist.get_rank() > 0:
return
except Exception as e:
print(e)
return
def restore_training_state(self, checkpoint):
"""
Restore trainer state.
Model will get its change to update
:param checkpoint:
:return:
"""
if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
self.checkpoint_callback.best = checkpoint['checkpoint_callback_best']
self.global_step = checkpoint['global_step']
self.current_epoch = checkpoint['epoch']
# restore the optimizers
optimizer_states = checkpoint['optimizer_states']
for optimizer, opt_state in zip(self.optimizers, optimizer_states):
optimizer.load_state_dict(opt_state)
# move optimizer to GPU 1 weight at a time
# avoids OOM
if self.root_gpu is not None:
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda(self.root_gpu)
# restore the lr schedulers
lr_schedulers = checkpoint['lr_schedulers']
for scheduler, lrs_state in zip(self.lr_schedulers, lr_schedulers):
scheduler.load_state_dict(lrs_state)
# --------------------
# MODEL SAVE CHECKPOINT
# --------------------
def _atomic_save(self, checkpoint, filepath):
"""Saves a checkpoint atomically, avoiding the creation of incomplete checkpoints.
This will create a temporary checkpoint with a suffix of ``.part``, then copy it to the final location once
saving is finished.
Args:
checkpoint (object): The object to save.
Built to be used with the ``dump_checkpoint`` method, but can deal with anything which ``torch.save``
accepts.
filepath (str|pathlib.Path): The path to which the checkpoint will be saved.
This points to the file that the checkpoint will be stored in.
"""
tmp_path = str(filepath) + ".part"
torch.save(checkpoint, tmp_path)
os.replace(tmp_path, filepath)
def save_checkpoint(self, filepath):
checkpoint = self.dump_checkpoint()
self._atomic_save(checkpoint, filepath)
def dump_checkpoint(self):
checkpoint = {
'epoch': self.current_epoch,
'global_step': self.global_step
}
if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
checkpoint['checkpoint_callback_best'] = self.checkpoint_callback.best
# save optimizers
optimizer_states = []
for i, optimizer in enumerate(self.optimizers):
optimizer_states.append(optimizer.state_dict())
checkpoint['optimizer_states'] = optimizer_states
# save lr schedulers
lr_schedulers = []
for i, scheduler in enumerate(self.lr_schedulers):
lr_schedulers.append(scheduler.state_dict())
checkpoint['lr_schedulers'] = lr_schedulers
# add the hparams and state_dict from the model
model = self.get_model()
checkpoint['state_dict'] = model.state_dict()
# give the model a chance to add a few things
model.on_save_checkpoint(checkpoint)
return checkpoint
def copy_trainer_model_properties(self, model):
if isinstance(model, DP):
ref_model = model.module
elif isinstance(model, DDP):
ref_model = model.module
else:
ref_model = model
for m in [model, ref_model]:
m.trainer = self
m.on_gpu = self.on_gpu
m.use_dp = self.use_dp
m.use_ddp = self.use_ddp
m.testing = self.testing
m.single_gpu = self.single_gpu
def transfer_batch_to_gpu(self, batch, gpu_id):
# base case: object can be directly moved using `cuda` or `to`
if callable(getattr(batch, 'cuda', None)):
return batch.cuda(gpu_id)
elif callable(getattr(batch, 'to', None)):
return batch.to(torch.device('cuda', gpu_id))
# when list
elif isinstance(batch, list):
for i, x in enumerate(batch):
batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
return batch
# when tuple
elif isinstance(batch, tuple):
batch = list(batch)
for i, x in enumerate(batch):
batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
return tuple(batch)
# when dict
elif isinstance(batch, dict):
for k, v in batch.items():
batch[k] = self.transfer_batch_to_gpu(v, gpu_id)
return batch
# nothing matches, return the value as is without transform
return batch
def cpu_train(self, model):
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
self.run_pretrain_routine(model)
def single_gpu_train(self, model):
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
model.cuda(self.root_gpu)
self.run_pretrain_routine(model)
def dp_train(self, model):
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
model.cuda(self.root_gpu)
# create list of device ids
device_ids = self.data_parallel_device_ids
model = DP(model, device_ids=device_ids)
self.run_pretrain_routine(model)
def set_distributed_mode(self, distributed_backend):
# skip for CPU
if self.num_gpus == 0:
return
# single GPU case
# in single gpu case we allow ddp so we can train on multiple
# nodes, 1 gpu per node
elif self.num_gpus == 1:
self.single_gpu = True
self.use_dp = False
self.use_ddp = False
self.root_gpu = 0
self.data_parallel_device_ids = [0]
else:
if distributed_backend is not None:
self.use_dp = distributed_backend == 'dp'
self.use_ddp = distributed_backend == 'ddp'
elif distributed_backend is None:
self.use_dp = True
self.use_ddp = False
logging.info(f'gpu available: {torch.cuda.is_available()}, used: {self.on_gpu}')
def ddp_train(self, gpu_idx, model):
"""
Entry point into a DP thread
:param gpu_idx:
:param model:
:param cluster_obj:
:return:
"""
# otherwise default to node rank 0
self.node_rank = 0
# show progressbar only on progress_rank 0
self.show_progress_bar = self.show_progress_bar and self.node_rank == 0 and gpu_idx == 0
# determine which process we are and world size
if self.use_ddp:
self.proc_rank = self.node_rank * self.num_gpus + gpu_idx
self.world_size = self.num_gpus
# let the exp know the rank to avoid overwriting logs
if self.logger is not None:
self.logger.rank = self.proc_rank
# set up server using proc 0's ip address
# try to init for 20 times at max in case ports are taken
# where to store ip_table
model.trainer = self
model.init_ddp_connection(self.proc_rank, self.world_size)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
# MODEL
# copy model to each gpu
if self.distributed_backend == 'ddp':
torch.cuda.set_device(gpu_idx)
model.cuda(gpu_idx)
# set model properties before going into wrapper
self.copy_trainer_model_properties(model)
# override root GPU
self.root_gpu = gpu_idx
if self.distributed_backend == 'ddp':
device_ids = [gpu_idx]
else:
device_ids = None
# allow user to configure ddp
model = model.configure_ddp(model, device_ids)
# continue training routine
self.run_pretrain_routine(model)
def resolve_root_node_address(self, root_node):
if '[' in root_node:
name = root_node.split('[')[0]
number = root_node.split(',')[0]
if '-' in number:
number = number.split('-')[0]
number = re.sub('[^0-9]', '', number)
root_node = name + number
return root_node
def log_metrics(self, metrics, grad_norm_dic, step=None):
"""Logs the metric dict passed in.
:param metrics:
:param grad_norm_dic:
"""
# added metrics by Lightning for convenience
metrics['epoch'] = self.current_epoch
# add norms
metrics.update(grad_norm_dic)
# turn all tensors to scalars
scalar_metrics = self.metrics_to_scalars(metrics)
step = step if step is not None else self.global_step
# log actual metrics
if self.proc_rank == 0 and self.logger is not None:
self.logger.log_metrics(scalar_metrics, step=step)
self.logger.save()
def add_tqdm_metrics(self, metrics):
for k, v in metrics.items():
if type(v) is torch.Tensor:
v = v.item()
self.tqdm_metrics[k] = v
def metrics_to_scalars(self, metrics):
new_metrics = {}
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
if type(v) is dict:
v = self.metrics_to_scalars(v)
new_metrics[k] = v
return new_metrics
def process_output(self, output, train=False):
"""Reduces output according to the training mode.
Separates loss from logging and tqdm metrics
:param output:
:return:
"""
# ---------------
# EXTRACT CALLBACK KEYS
# ---------------
# all keys not progress_bar or log are candidates for callbacks
callback_metrics = {}
for k, v in output.items():
if k not in ['progress_bar', 'log', 'hiddens']:
callback_metrics[k] = v
if train and self.use_dp:
num_gpus = self.num_gpus
callback_metrics = self.reduce_distributed_output(callback_metrics, num_gpus)
for k, v in callback_metrics.items():
if isinstance(v, torch.Tensor):
callback_metrics[k] = v.item()
# ---------------
# EXTRACT PROGRESS BAR KEYS
# ---------------
try:
progress_output = output['progress_bar']
# reduce progress metrics for tqdm when using dp
if train and self.use_dp:
num_gpus = self.num_gpus
progress_output = self.reduce_distributed_output(progress_output, num_gpus)
progress_bar_metrics = progress_output
except Exception:
progress_bar_metrics = {}
# ---------------
# EXTRACT LOGGING KEYS
# ---------------
# extract metrics to log to experiment
try:
log_output = output['log']
# reduce progress metrics for tqdm when using dp
if train and self.use_dp:
num_gpus = self.num_gpus
log_output = self.reduce_distributed_output(log_output, num_gpus)
log_metrics = log_output
except Exception:
log_metrics = {}
# ---------------
# EXTRACT LOSS
# ---------------
# if output dict doesn't have the keyword loss
# then assume the output=loss if scalar
loss = None
if train:
try:
loss = output['loss']
except Exception:
if type(output) is torch.Tensor:
loss = output
else:
raise RuntimeError(
'No `loss` value in the dictionary returned from `model.training_step()`.'
)
# when using dp need to reduce the loss
if self.use_dp:
loss = self.reduce_distributed_output(loss, self.num_gpus)
# ---------------
# EXTRACT HIDDEN
# ---------------
hiddens = output.get('hiddens')
# use every metric passed in as a candidate for callback
callback_metrics.update(progress_bar_metrics)
callback_metrics.update(log_metrics)
# convert tensors to numpy
for k, v in callback_metrics.items():
if isinstance(v, torch.Tensor):
callback_metrics[k] = v.item()
return loss, progress_bar_metrics, log_metrics, callback_metrics, hiddens
def reduce_distributed_output(self, output, num_gpus):
if num_gpus <= 1:
return output
# when using DP, we get one output per gpu
# average outputs and return
if type(output) is torch.Tensor:
return output.mean()
for k, v in output.items():
# recurse on nested dics
if isinstance(output[k], dict):
output[k] = self.reduce_distributed_output(output[k], num_gpus)
# do nothing when there's a scalar
elif isinstance(output[k], torch.Tensor) and output[k].dim() == 0:
pass
# reduce only metrics that have the same number of gpus
elif output[k].size(0) == num_gpus:
reduced = torch.mean(output[k])
output[k] = reduced
return output
def clip_gradients(self):
if self.gradient_clip_val > 0:
model = self.get_model()
torch.nn.utils.clip_grad_norm_(model.parameters(), self.gradient_clip_val)
def print_nan_gradients(self):
model = self.get_model()
for param in model.parameters():
if (param.grad is not None) and torch.isnan(param.grad.float()).any():
logging.info(param, param.grad)
def configure_accumulated_gradients(self, accumulate_grad_batches):
self.accumulate_grad_batches = None
if isinstance(accumulate_grad_batches, dict):
self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches)
elif isinstance(accumulate_grad_batches, int):
schedule = {1: accumulate_grad_batches}
self.accumulation_scheduler = GradientAccumulationScheduler(schedule)
else:
raise TypeError("Gradient accumulation supports only int and dict types")
def get_dataloaders(self, model):
self.init_train_dataloader(model)
self.init_test_dataloader(model)
self.init_val_dataloader(model)
if self.use_ddp:
dist.barrier()
self.get_train_dataloader()
self.get_test_dataloaders()
self.get_val_dataloaders()
def init_train_dataloader(self, model):
self.fisrt_epoch = True
self.get_train_dataloader = model.train_dataloader
if isinstance(self.get_train_dataloader(), torch.utils.data.DataLoader):
self.num_training_batches = len(self.get_train_dataloader())
self.num_training_batches = int(self.num_training_batches)
else:
self.num_training_batches = float('inf')
self.is_iterable_train_dataloader = True
if isinstance(self.val_check_interval, int):
self.val_check_batch = self.val_check_interval
else:
self._percent_range_check('val_check_interval')
self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
self.val_check_batch = max(1, self.val_check_batch)
def init_val_dataloader(self, model):
self.get_val_dataloaders = model.val_dataloader
self.num_val_batches = 0
if self.get_val_dataloaders() is not None:
if isinstance(self.get_val_dataloaders()[0], torch.utils.data.DataLoader):
self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())
self.num_val_batches = int(self.num_val_batches)
else:
self.num_val_batches = float('inf')
def init_test_dataloader(self, model):
self.get_test_dataloaders = model.test_dataloader
if self.get_test_dataloaders() is not None:
if isinstance(self.get_test_dataloaders()[0], torch.utils.data.DataLoader):
self.num_test_batches = sum(len(dataloader) for dataloader in self.get_test_dataloaders())
self.num_test_batches = int(self.num_test_batches)
else:
self.num_test_batches = float('inf')
def evaluate(self, model, dataloaders, max_batches, test=False):
"""Run evaluation code.
:param model: PT model
:param dataloaders: list of PT dataloaders
:param max_batches: Scalar
:param test: boolean
:return:
"""
# enable eval mode
model.zero_grad()
model.eval()
# copy properties for forward overrides
self.copy_trainer_model_properties(model)
# disable gradients to save memory
torch.set_grad_enabled(False)
if test:
self.get_model().test_start()
# bookkeeping
outputs = []
# run training
for dataloader_idx, dataloader in enumerate(dataloaders):
dl_outputs = []
for batch_idx, batch in enumerate(dataloader):
if batch is None: # pragma: no cover
continue
# stop short when on fast_dev_run (sets max_batch=1)
if batch_idx >= max_batches:
break
# -----------------
# RUN EVALUATION STEP
# -----------------
output = self.evaluation_forward(model,
batch,
batch_idx,
dataloader_idx,
test)
# track outputs for collation
dl_outputs.append(output)
# batch done
if test:
self.test_progress_bar.update(1)
else:
self.val_progress_bar.update(1)
self.main_progress_bar.update(1)
outputs.append(dl_outputs)
# with a single dataloader don't pass an array
if len(dataloaders) == 1:
outputs = outputs[0]
# give model a chance to do something with the outputs (and method defined)
model = self.get_model()
if test:
eval_results_ = model.test_end(outputs)
else:
eval_results_ = model.validation_end(outputs)
if eval_results_ is not None:
eval_results = eval_results_
# enable train mode again
model.train()
# enable gradients to save memory
torch.set_grad_enabled(True)
return eval_results
def run_evaluation(self, test=False):
# when testing make sure user defined a test step
model = self.get_model()
model.on_pre_performance_check()
# select dataloaders
if test:
dataloaders = self.get_test_dataloaders()
max_batches = self.num_test_batches
else:
# val
dataloaders = self.get_val_dataloaders()
max_batches = self.num_val_batches
# init validation or test progress bar
# main progress bar will already be closed when testing so initial position is free
position = 2 * self.process_position + (not test)
desc = 'Testing' if test else 'Validating'
pbar = tqdm.tqdm(desc=desc, total=max_batches, leave=test, position=position,
disable=not self.show_progress_bar, dynamic_ncols=True,
unit='batch', file=sys.stdout)
setattr(self, f'{"test" if test else "val"}_progress_bar', pbar)
# run evaluation
eval_results = self.evaluate(self.model,
dataloaders,
max_batches,
test)
_, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output(
eval_results)
# add metrics to prog bar
self.add_tqdm_metrics(prog_bar_metrics)
# log metrics
self.log_metrics(log_metrics, {})
# track metrics for callbacks
self.callback_metrics.update(callback_metrics)
# hook
model.on_post_performance_check()
# add model specific metrics
tqdm_metrics = self.training_tqdm_dict
if not test:
self.main_progress_bar.set_postfix(**tqdm_metrics)
# close progress bar
if test:
self.test_progress_bar.close()
else:
self.val_progress_bar.close()
# model checkpointing
if self.proc_rank == 0 and self.checkpoint_callback is not None and not test:
self.checkpoint_callback.on_epoch_end(epoch=self.current_epoch,
logs=self.callback_metrics)
def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test=False):
# make dataloader_idx arg in validation_step optional
args = [batch, batch_idx]
if test and len(self.get_test_dataloaders()) > 1:
args.append(dataloader_idx)
elif not test and len(self.get_val_dataloaders()) > 1:
args.append(dataloader_idx)
# handle DP, DDP forward
if self.use_ddp or self.use_dp:
output = model(*args)
return output
# single GPU
if self.single_gpu:
# for single GPU put inputs on gpu manually
root_gpu = 0
if isinstance(self.data_parallel_device_ids, list):
root_gpu = self.data_parallel_device_ids[0]
batch = self.transfer_batch_to_gpu(batch, root_gpu)
args[0] = batch
# CPU
if test:
output = model.test_step(*args)
else:
output = model.validation_step(*args)
return output
def train(self):
model = self.get_model()
# run all epochs
for epoch in range(self.current_epoch, 1000000):
# set seed for distributed sampler (enables shuffling for each epoch)
if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):
self.get_train_dataloader().sampler.set_epoch(epoch)
# get model
model = self.get_model()
# update training progress in trainer and model
model.current_epoch = epoch
self.current_epoch = epoch
total_val_batches = 0
if not self.disable_validation:
# val can be checked multiple times in epoch
is_val_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
val_checks_per_epoch = self.num_training_batches // self.val_check_batch
val_checks_per_epoch = val_checks_per_epoch if is_val_epoch else 0
total_val_batches = self.num_val_batches * val_checks_per_epoch
# total batches includes multiple val checks
self.total_batches = self.num_training_batches + total_val_batches
self.batch_loss_value = 0 # accumulated grads
if self.is_iterable_train_dataloader:
# for iterable train loader, the progress bar never ends
num_iterations = None
else:
num_iterations = self.total_batches
# reset progress bar
# .reset() doesn't work on disabled progress bar so we should check
if not self.main_progress_bar.disable:
self.main_progress_bar.reset(num_iterations)
desc = f'Epoch {epoch + 1}' if not self.is_iterable_train_dataloader else ''
self.main_progress_bar.set_description(desc)
# changing gradient according accumulation_scheduler
self.accumulation_scheduler.on_epoch_begin(epoch, self)
# -----------------
# RUN TNG EPOCH
# -----------------
self.run_training_epoch()
# update LR schedulers
if self.lr_schedulers is not None:
for lr_scheduler in self.lr_schedulers:
lr_scheduler.step(epoch=self.current_epoch)
if self.global_step > self.max_updates:
print("| Training end..")
break #exit()
self.main_progress_bar.close()
model.on_train_end()
if self.logger is not None:
self.logger.finalize("success")
def run_training_epoch(self):
# before epoch hook
if self.is_function_implemented('on_epoch_start'):
model = self.get_model()
model.on_epoch_start()
# run epoch
for batch_idx, batch in enumerate(self.get_train_dataloader()):
# stop epoch if we limited the number of training batches
if batch_idx >= self.num_training_batches:
break
self.batch_idx = batch_idx
model = self.get_model()
model.global_step = self.global_step
# ---------------
# RUN TRAIN STEP
# ---------------
output = self.run_training_batch(batch, batch_idx)
batch_result, grad_norm_dic, batch_step_metrics = output
# when returning -1 from train_step, we end epoch early
early_stop_epoch = batch_result == -1
# ---------------
# RUN VAL STEP
# ---------------
should_check_val = (
not self.disable_validation and self.global_step % self.val_check_batch == 0 and not self.fisrt_epoch)
self.fisrt_epoch = False
if should_check_val:
self.run_evaluation(test=self.testing)
# when logs should be saved
should_save_log = (batch_idx + 1) % self.log_save_interval == 0 or early_stop_epoch
if should_save_log:
if self.proc_rank == 0 and self.logger is not None:
self.logger.save()
# when metrics should be logged
should_log_metrics = batch_idx % self.row_log_interval == 0 or early_stop_epoch
if should_log_metrics:
# logs user requested information to logger
self.log_metrics(batch_step_metrics, grad_norm_dic)
self.global_step += 1
self.total_batch_idx += 1
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if early_stop_epoch:
break
if self.global_step > self.max_updates:
#print("| Training end..")
break #exit()
# epoch end hook
if self.is_function_implemented('on_epoch_end'):
model = self.get_model()
model.on_epoch_end()
def run_training_batch(self, batch, batch_idx):
# track grad norms
grad_norm_dic = {}
# track all metrics for callbacks
all_callback_metrics = []
# track metrics to log
all_log_metrics = []
if batch is None:
return 0, grad_norm_dic, {}
# hook
if self.is_function_implemented('on_batch_start'):
model_ref = self.get_model()
response = model_ref.on_batch_start(batch)
if response == -1:
return -1, grad_norm_dic, {}
splits = [batch]
self.hiddens = None
for split_idx, split_batch in enumerate(splits):
self.split_idx = split_idx
# call training_step once per optimizer
for opt_idx, optimizer in enumerate(self.optimizers):
# make sure only the gradients of the current optimizer's paramaters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if len(self.optimizers) > 1:
for param in self.get_model().parameters():
param.requires_grad = False
for group in optimizer.param_groups:
for param in group['params']:
param.requires_grad = True
# wrap the forward step in a closure so second order methods work
def optimizer_closure():
# forward pass
output = self.training_forward(
split_batch, batch_idx, opt_idx, self.hiddens)
closure_loss = output[0]
progress_bar_metrics = output[1]
log_metrics = output[2]
callback_metrics = output[3]
self.hiddens = output[4]
if closure_loss is None:
return None
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
closure_loss = closure_loss / self.accumulate_grad_batches
# backward pass
model_ref = self.get_model()
if closure_loss.requires_grad:
model_ref.backward(closure_loss, optimizer)
# track metrics for callbacks
all_callback_metrics.append(callback_metrics)
# track progress bar metrics
self.add_tqdm_metrics(progress_bar_metrics)
all_log_metrics.append(log_metrics)
# insert after step hook
if self.is_function_implemented('on_after_backward'):
model_ref = self.get_model()
model_ref.on_after_backward()
return closure_loss
# calculate loss
loss = optimizer_closure()
if loss is None:
continue
# nan grads
if self.print_nan_grads:
self.print_nan_gradients()
# track total loss for logging (avoid mem leaks)
self.batch_loss_value += loss.item()
# gradient update with accumulated gradients
if (self.batch_idx + 1) % self.accumulate_grad_batches == 0:
# track gradient norms when requested
if batch_idx % self.row_log_interval == 0:
if self.track_grad_norm > 0:
model = self.get_model()
grad_norm_dic = model.grad_norm(
self.track_grad_norm)
# clip gradients
self.clip_gradients()
# calls .step(), .zero_grad()
# override function to modify this behavior
model = self.get_model()
model.optimizer_step(self.current_epoch, batch_idx, optimizer, opt_idx)
# calculate running loss for display
self.running_loss.append(self.batch_loss_value)
self.batch_loss_value = 0
self.avg_loss = np.mean(self.running_loss[-100:])
# activate batch end hook
if self.is_function_implemented('on_batch_end'):
model = self.get_model()
model.on_batch_end()
# update progress bar
self.main_progress_bar.update(1)
self.main_progress_bar.set_postfix(**self.training_tqdm_dict)
# collapse all metrics into one dict
all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}
# track all metrics for callbacks
self.callback_metrics.update({k: v for d in all_callback_metrics for k, v in d.items()})
return 0, grad_norm_dic, all_log_metrics
def training_forward(self, batch, batch_idx, opt_idx, hiddens):
"""
Handle forward for each training case (distributed, single gpu, etc...)
:param batch:
:param batch_idx:
:return:
"""
# ---------------
# FORWARD
# ---------------
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx, opt_idx]
# distributed forward
if self.use_ddp or self.use_dp:
output = self.model(*args)
# single GPU forward
elif self.single_gpu:
gpu_id = 0
if isinstance(self.data_parallel_device_ids, list):
gpu_id = self.data_parallel_device_ids[0]
batch = self.transfer_batch_to_gpu(copy.copy(batch), gpu_id)
args[0] = batch
output = self.model.training_step(*args)
# CPU forward
else:
output = self.model.training_step(*args)
# allow any mode to define training_end
model_ref = self.get_model()
output_ = model_ref.training_end(output)
if output_ is not None:
output = output_
# format and reduce outputs accordingly
output = self.process_output(output, train=True)
return output
# ---------------
# Utils
# ---------------
def is_function_implemented(self, f_name):
model = self.get_model()
f_op = getattr(model, f_name, None)
return callable(f_op)
def _percent_range_check(self, name):
value = getattr(self, name)
msg = f"`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}."
if name == "val_check_interval":
msg += " If you want to disable validation set `val_percent_check` to 0.0 instead."
if not 0. <= value <= 1.:
raise ValueError(msg)
| 59,913 | 35.201813 | 122 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/utils/pwg_decode_from_mel.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import yaml
import numpy as np
from sklearn.preprocessing import StandardScaler
import torch
from torch import nn
import utils
from parallel_wavegan.models import ParallelWaveGANGenerator
from parallel_wavegan.utils import read_hdf5
def load_pwg_model(config_path, checkpoint_path, stats_path):
# load config
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
# setup
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = ParallelWaveGANGenerator(**config["generator_params"])
ckpt_dict = torch.load(checkpoint_path, map_location="cpu")
if 'state_dict' not in ckpt_dict: # official vocoder
model.load_state_dict(torch.load(checkpoint_path, map_location="cpu")["model"]["generator"])
scaler = StandardScaler()
if config["format"] == "hdf5":
scaler.mean_ = read_hdf5(stats_path, "mean")
scaler.scale_ = read_hdf5(stats_path, "scale")
elif config["format"] == "npy":
scaler.mean_ = np.load(stats_path)[0]
scaler.scale_ = np.load(stats_path)[1]
else:
raise ValueError("support only hdf5 or npy format.")
else: # custom PWG vocoder
fake_task = nn.Module()
fake_task.model_gen = model
fake_task.load_state_dict(torch.load(checkpoint_path, map_location="cpu")["state_dict"], strict=False)
scaler = None
model.remove_weight_norm()
model = model.eval().to(device)
logging.info(f"loaded model parameters from {checkpoint_path}.")
return model, scaler, config, device
def generate_wavegan(c, model, scaler, config, device, profile=False):
# start generation
pad_size = (config["generator_params"]["aux_context_window"],
config["generator_params"]["aux_context_window"])
if scaler is not None:
c = scaler.transform(c)
with torch.no_grad():
with utils.Timer('vocoder', print_time=profile):
# generate each utterance
z = torch.randn(1, 1, c.shape[0] * config["hop_size"]).to(device)
c = np.pad(c, (pad_size, (0, 0)), "edge")
c = torch.FloatTensor(c).unsqueeze(0).transpose(2, 1).to(device)
y = model(z, c).view(-1)
return y
| 2,401 | 33.811594 | 110 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/utils/__init__.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import glob
import logging
import re
import time
from collections import defaultdict
import shutil
import types
import numpy as np
import torch
import torch.nn.functional as F
import torch.distributed as dist
def reduce_tensors(metrics):
new_metrics = {}
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
dist.all_reduce(v)
v = v / dist.get_world_size()
if type(v) is dict:
v = reduce_tensors(v)
new_metrics[k] = v
return new_metrics
def tensors_to_scalars(metrics):
new_metrics = {}
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
if type(v) is dict:
v = tensors_to_scalars(v)
new_metrics[k] = v
return new_metrics
def move_to_cpu(tensors):
ret = {}
for k, v in tensors.items():
if isinstance(v, torch.Tensor):
v = v.cpu()
if type(v) is dict:
v = move_to_cpu(v)
ret[k] = v
return ret
def move_to_cuda(tensor):
if torch.cuda.is_available():
return tensor.cuda(non_blocking=True)
return tensor
def count_parameters(model):
total_params = 0
for p in model.parameters():
total_params += p.numel()
return total_params
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def collate_1d(values, pad_idx=0, left_pad=False, max_len=None):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values) if max_len is None else max_len
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
def collate_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None):
"""Convert a list of 2d tensors into a padded 3d tensor."""
size = max(v.size(0) for v in values) if max_len is None else max_len
res = values[0].new(len(values), size, values[0].shape[1]).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if shift_right:
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
def _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
if len(batch) == 0:
return 0
if len(batch) == max_sentences:
return 1
if num_tokens > max_tokens:
return 1
return 0
def batch_by_size(
indices, num_tokens_fn, max_tokens=None, max_sentences=None,
required_batch_size_multiple=1, distributed=False
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
"""
max_tokens = max_tokens if max_tokens is not None else sys.maxsize
max_sentences = max_sentences if max_sentences is not None else sys.maxsize
bsz_mult = required_batch_size_multiple
if isinstance(indices, types.GeneratorType):
indices = np.fromiter(indices, dtype=np.int64, count=-1)
sample_len = 0
sample_lens = []
batch = []
batches = []
for i in range(len(indices)):
idx = indices[i]
num_tokens = num_tokens_fn(idx)
sample_lens.append(num_tokens)
sample_len = max(sample_len, num_tokens)
assert sample_len <= max_tokens, (
"sentence at index {} of size {} exceeds max_tokens "
"limit of {}!".format(idx, sample_len, max_tokens)
)
num_tokens = (len(batch) + 1) * sample_len
if _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
mod_len = max(
bsz_mult * (len(batch) // bsz_mult),
len(batch) % bsz_mult,
)
batches.append(batch[:mod_len])
batch = batch[mod_len:]
sample_lens = sample_lens[mod_len:]
sample_len = max(sample_lens) if len(sample_lens) > 0 else 0
batch.append(idx)
if len(batch) > 0:
batches.append(batch)
return batches
def make_positions(tensor, padding_idx):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (
torch.cumsum(mask, dim=1).type_as(mask) * mask
).long() + padding_idx
def softmax(x, dim):
return F.softmax(x, dim=dim, dtype=torch.float32)
def sequence_mask(lengths, maxlen, dtype=torch.bool):
if maxlen is None:
maxlen = lengths.max()
mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t()
mask.type(dtype)
return mask
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
def _get_full_incremental_state_key(module_instance, key):
module_name = module_instance.__class__.__name__
# assign a unique ID to each module instance, so that incremental state is
# not shared across module instances
if not hasattr(module_instance, '_instance_id'):
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
module_instance._instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
return '{}.{}.{}'.format(module_name, module_instance._instance_id, key)
def get_incremental_state(module, incremental_state, key):
"""Helper for getting incremental state for an nn.Module."""
full_key = _get_full_incremental_state_key(module, key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(module, incremental_state, key, value):
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = _get_full_incremental_state_key(module, key)
incremental_state[full_key] = value
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float('-inf')).type_as(t)
def fill_with_neg_inf2(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(-1e9).type_as(t)
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def get_all_ckpts(checkpoint_name):
all_ckpts = glob.glob(checkpoint_name)
all_ckpts = [x for x in all_ckpts if len(re.findall(r'.*/checkpoint(\d+).pt', x)) > 0]
return sorted(all_ckpts, key=lambda x: -int(re.findall(r'.*/checkpoint(\d+).pt', x)[0]))
def save(model_path, model, epoch, step, optimizer, best_valid_loss=None, is_best=True):
if isinstance(optimizer, dict):
optimizer_states = {k: x.state_dict() for k, x in optimizer.items()}
else:
optimizer_states = optimizer.state_dict()
if isinstance(model, dict):
model_states = {k: (x.state_dict() if not hasattr(x, 'module') else x.module.state_dict())
for k, x in model.items()}
else:
model_states = model.state_dict() if not hasattr(model, 'module') else model.module.state_dict()
state_dict = {
'model': model_states,
'optimizer': optimizer_states,
'epoch': epoch,
'step': step,
'best_valid_loss': best_valid_loss,
}
filename = os.path.join(model_path, 'checkpoint{}.pt'.format(epoch))
all_ckpts = get_all_ckpts(os.path.join(model_path, 'checkpoint*.pt'))
for c in all_ckpts[5:]:
logging.info(f"Remove ckpt: {c}")
os.remove(c)
torch.save(state_dict, filename)
newest_filename = os.path.join(model_path, 'checkpoint_latest.pt')
shutil.copyfile(filename, newest_filename)
logging.info(f'Save ckpt: {filename}.')
if is_best:
best_filename = os.path.join(model_path, 'checkpoint_best.pt')
shutil.copyfile(filename, best_filename)
logging.info(f'Find best ckpt.')
def load(model_path):
if os.path.isdir(model_path):
newest_filename = os.path.join(model_path, 'checkpoint_latest.pt')
else:
assert os.path.isfile(model_path), model_path
newest_filename = model_path
if not os.path.exists(newest_filename):
return None, 0, 0, None, float('inf')
state_dict = torch.load(newest_filename, map_location="cpu")
model_state_dict = state_dict['model']
epoch = state_dict['epoch']
step = state_dict['step']
optimizer_state_dict = state_dict['optimizer']
best_valid_loss = state_dict['best_valid_loss'] if state_dict['best_valid_loss'] is not None else float('inf')
return model_state_dict, epoch, step, optimizer_state_dict, best_valid_loss
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.makedirs(os.path.join(path, 'scripts'), exist_ok=True)
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def debug_log(fname, *args):
with open(fname, 'a+') as f:
for c in args:
f.write('{}\n'.format(c))
def unpack_dict_to_list(samples):
samples_ = []
bsz = samples.get('outputs').size(0)
for i in range(bsz):
res = {}
for k, v in samples.items():
try:
res[k] = v[i]
except:
pass
samples_.append(res)
return samples_
def get_focus_rate(attn, src_padding_mask=None, tgt_padding_mask=None):
'''
attn: bs x L_t x L_s
'''
if src_padding_mask is not None:
attn = attn * (1 - src_padding_mask.float())[:, None, :]
if tgt_padding_mask is not None:
attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
focus_rate = attn.max(-1).values.sum(-1)
focus_rate = focus_rate / attn.sum(-1).sum(-1)
return focus_rate
def get_phone_coverage_rate(attn, src_padding_mask=None, src_seg_mask=None, tgt_padding_mask=None):
'''
attn: bs x L_t x L_s
'''
src_mask = attn.new(attn.size(0), attn.size(-1)).bool().fill_(False)
if src_padding_mask is not None:
src_mask |= src_padding_mask
if src_seg_mask is not None:
src_mask |= src_seg_mask
attn = attn * (1 - src_mask.float())[:, None, :]
if tgt_padding_mask is not None:
attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
phone_coverage_rate = attn.max(1).values.sum(-1)
# phone_coverage_rate = phone_coverage_rate / attn.sum(-1).sum(-1)
phone_coverage_rate = phone_coverage_rate / (1 - src_mask.float()).sum(-1)
return phone_coverage_rate
def get_diagonal_focus_rate(attn, attn_ks, target_len, src_padding_mask=None, tgt_padding_mask=None,
band_mask_factor=5, band_width=50):
'''
attn: bx x L_t x L_s
attn_ks: shape: tensor with shape [batch_size], input_lens/output_lens
diagonal: y=k*x (k=attn_ks, x:output, y:input)
1 0 0
0 1 0
0 0 1
y>=k*(x-width) and y<=k*(x+width):1
else:0
'''
# width = min(target_len/band_mask_factor, 50)
width1 = target_len / band_mask_factor
width2 = target_len.new(target_len.size()).fill_(band_width)
width = torch.where(width1 < width2, width1, width2).float()
base = torch.ones(attn.size()).to(attn.device)
zero = torch.zeros(attn.size()).to(attn.device)
x = torch.arange(0, attn.size(1)).to(attn.device)[None, :, None].float() * base
y = torch.arange(0, attn.size(2)).to(attn.device)[None, None, :].float() * base
cond = (y - attn_ks[:, None, None] * x)
cond1 = cond + attn_ks[:, None, None] * width[:, None, None]
cond2 = cond - attn_ks[:, None, None] * width[:, None, None]
mask1 = torch.where(cond1 < 0, zero, base)
mask2 = torch.where(cond2 > 0, zero, base)
mask = mask1 * mask2
if src_padding_mask is not None:
attn = attn * (1 - src_padding_mask.float())[:, None, :]
if tgt_padding_mask is not None:
attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
diagonal_attn = attn * mask
diagonal_focus_rate = diagonal_attn.sum(-1).sum(-1) / attn.sum(-1).sum(-1)
return diagonal_focus_rate, mask
def generate_arch(n, layers, num_ops=10):
def _get_arch():
arch = [np.random.randint(1, num_ops + 1) for _ in range(layers)]
return arch
archs = [_get_arch() for i in range(n)]
return archs
def parse_arch_to_seq(arch):
seq = [op for op in arch]
return seq
def parse_seq_to_arch(seq):
arch = [idx for idx in seq]
return arch
def pairwise_accuracy(la, lb):
n = len(la)
assert n == len(lb)
total = 0
count = 0
for i in range(n):
for j in range(i + 1, n):
if la[i] >= la[j] and lb[i] >= lb[j]:
count += 1
if la[i] < la[j] and lb[i] < lb[j]:
count += 1
total += 1
return float(count) / total
def hamming_distance(la, lb):
N = len(la)
assert N == len(lb)
def _hamming_distance(s1, s2):
n = len(s1)
assert n == len(s2)
c = 0
for i, j in zip(s1, s2):
if i != j:
c += 1
return c
dis = 0
for i in range(N):
line1 = la[i]
line2 = lb[i]
dis += _hamming_distance(line1, line2)
return dis / N
def sample_arch(arch_pool, prob=None):
N = len(arch_pool)
indices = [i for i in range(N)]
if prob is not None:
prob = np.array(prob, dtype=np.float32)
prob = prob / prob.sum()
index = np.random.choice(indices, p=prob)
else:
index = np.random.choice(indices)
arch = arch_pool[index]
return arch
def select_attn(attn_logits, type='best'):
"""
:param attn_logits: [n_layers, B, n_head, T_sp, T_txt]
:return:
"""
encdec_attn = torch.stack(attn_logits, 0).transpose(1, 2)
# [n_layers * n_head, B, T_sp, T_txt]
encdec_attn = (encdec_attn.reshape([-1, *encdec_attn.shape[2:]])).softmax(-1)
if type == 'best':
indices = encdec_attn.max(-1).values.sum(-1).argmax(0)
encdec_attn = encdec_attn.gather(
0, indices[None, :, None, None].repeat(1, 1, encdec_attn.size(-2), encdec_attn.size(-1)))[0]
return encdec_attn
elif type == 'mean':
return encdec_attn.mean(0)
def get_num_heads(arch):
num_heads = []
for i in range(len(arch)):
op = arch[i]
if op <= 7 or op == 11:
num_heads.append(1)
elif op == 8:
num_heads.append(2)
elif op == 9:
num_heads.append(4)
elif op == 10:
num_heads.append(8)
return num_heads
def remove_padding(x, padding_idx=0):
if x is None:
return None
assert len(x.shape) in [1, 2]
if len(x.shape) == 2: # [T, H]
return x[np.abs(x).sum(-1) != padding_idx]
elif len(x.shape) == 1: # [T]
return x[x != padding_idx]
class Timer:
timer_map = {}
def __init__(self, name, print_time=False):
if name not in Timer.timer_map:
Timer.timer_map[name] = 0
self.name = name
self.print_time = print_time
def __enter__(self):
self.t = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
Timer.timer_map[self.name] += time.time() - self.t
if self.print_time:
print(self.name, Timer.timer_map[self.name])
| 16,922 | 29.994505 | 114 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/utils/world_utils.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##########
# world
##########
import numpy as np
import pysptk
import copy
import math
import torch
import torch.nn as nn
gamma = 0
mcepInput = 3 # 0 for dB, 3 for magnitude
alpha = 0.45
en_floor = 10 ** (-80 / 20)
FFT_SIZE = 2048
def code_harmonic(sp, order):
# get mcep
mceps = np.apply_along_axis(pysptk.mcep, 1, sp, order - 1, alpha, itype=mcepInput, threshold=en_floor)
# do fft and take real
scale_mceps = copy.copy(mceps)
scale_mceps[:, 0] *= 2
scale_mceps[:, -1] *= 2
mirror = np.hstack([scale_mceps[:, :-1], scale_mceps[:, -1:0:-1]])
mfsc = np.fft.rfft(mirror).real
return mfsc
def decode_harmonic(mfsc, fftlen=FFT_SIZE):
# get mcep back
mceps_mirror = np.fft.irfft(mfsc)
mceps_back = mceps_mirror[:, :60]
mceps_back[:, 0] /= 2
mceps_back[:, -1] /= 2
# get sp
spSm = np.exp(np.apply_along_axis(pysptk.mgc2sp, 1, mceps_back, alpha, gamma, fftlen=fftlen).real)
return spSm
f0_bin = 256
f0_max = 1100.0
f0_min = 50.0
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
def f0_to_coarse(f0):
f0_mel = 1127 * np.log(1 + f0 / 700)
# f0_mel[f0_mel == 0] = 0
# split those greater than 0 to 255 bins
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
f0_mel[f0_mel < 0] = 1
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
f0_coarse = np.rint(f0_mel).astype(np.int)
# print('Max f0', np.max(f0_coarse), ' ||Min f0', np.min(f0_coarse))
assert (np.max(f0_coarse) <= 256 and np.min(f0_coarse) >= 0), (f0_coarse.max(), f0_coarse.min())
return f0_coarse
def f0_to_coarse_torch(f0):
f0_mel = 1127 * (1 + f0 / 700).log()
# f0_mel[f0_mel == 0] = 0
# split those greater than 0 to 255 bins
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
f0_mel[f0_mel < 1] = 1
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
f0_coarse = (f0_mel + 0.5).long()
#print('Max f0', np.max(f0_coarse), ' ||Min f0', np.min(f0_coarse))
assert (f0_coarse.max() <= 255 and f0_coarse.min() >= 1), (f0_coarse.max(), f0_coarse.min())
return f0_coarse
def process_f0(f0, hparams):
f0_ = (f0 - hparams['f0_mean']) / hparams['f0_std']
f0_[f0 == 0] = np.interp(np.where(f0 == 0)[0], np.where(f0 > 0)[0], f0_[f0 > 0])
uv = (torch.FloatTensor(f0) == 0).float()
f0 = f0_
f0 = torch.FloatTensor(f0)
return f0, uv
def restore_pitch(pitch, uv, hparams, pitch_padding=None, min=None, max=None):
if pitch_padding is None:
pitch_padding = pitch == -200
pitch = pitch * hparams['f0_std'] + hparams['f0_mean']
if min is not None:
pitch = pitch.clamp(min=min)
if max is not None:
pitch = pitch.clamp(max=max)
if uv is not None:
pitch[uv > 0] = 1
pitch[pitch_padding] = 0
return pitch
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
def gelu(x):
if hasattr(torch.nn.functional, 'gelu'):
return torch.nn.functional.gelu(x.float()).type_as(x)
else:
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class GeLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return gelu(x)
class GeLUAcc(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return gelu_accurate(x)
def build_activation(act_func, inplace=True):
if act_func == 'relu':
return nn.ReLU(inplace=inplace)
elif act_func == 'relu6':
return nn.ReLU6(inplace=inplace)
elif act_func == 'gelu':
return GeLU()
elif act_func == 'gelu_accurate':
return GeLUAcc()
elif act_func == 'tanh':
return nn.Tanh()
elif act_func == 'sigmoid':
return nn.Sigmoid()
elif act_func is None:
return None
else:
raise ValueError('do not support: %s' % act_func)
def fix_dp_return_type(result, device):
if isinstance(result, torch.Tensor):
return result.to(device)
if isinstance(result, dict):
return {k:fix_dp_return_type(v, device) for k,v in result.items()}
if isinstance(result, tuple):
return tuple([fix_dp_return_type(v, device) for v in result])
if isinstance(result, list):
return [fix_dp_return_type(v, device) for v in result]
# Must be a number then
return torch.Tensor([result]).to(device)
def move_id2last(arch):
ID_OPID = 27
n = len(arch)
p1 = 0
while p1 < n and arch[p1] != ID_OPID:
p1 += 1
p2 = p1 + 1
while p2 < n and arch[p2] == ID_OPID:
p2 += 1
while p1 < n and p2 < n:
if arch[p1] == 27:
arch[p1], arch[p2] = arch[p2], arch[p1]
while p2 < n and arch[p2] == 27:
p2 += 1
p1 += 1
return arch
def generate_arch(n, layers, candidate_ops, adjust_id=True):
num_ops = len(candidate_ops)
if num_ops ** layers <= n:
return generate_all_arch(layers, candidate_ops)
enc_layer = dec_layers = layers // 2
def _get_arch():
arch = [candidate_ops[np.random.randint(num_ops)] for _ in range(layers)]
arch = move_id2last(arch[:enc_layer]) + move_id2last(arch[enc_layer:])
return arch
archs = [_get_arch() for i in range(n)]
return archs
def sample_arch(arch_pool, prob=None):
N = len(arch_pool)
indices = [i for i in range(N)]
if prob is not None:
prob = np.array(prob, dtype=np.float32)
prob = prob / prob.sum()
index = np.random.choice(indices, p=prob)
else:
index = np.random.choice(indices)
arch = arch_pool[index]
return arch
def generate_all_arch(layers, candidate_ops):
res = []
num_ops = len(candidate_ops)
def dfs(cur_arch, layer_id):
if layer_id == layers:
res.append(cur_arch)
return
for op in range(num_ops):
dfs(cur_arch+[candidate_ops[op]], layer_id+1)
dfs([], 0)
return res
def convert_to_features(arch, candidate_ops):
res = []
num_ops = len(candidate_ops)
op2id = {candidate_ops[i]:i for i in range(num_ops)}
for op in arch:
tmp = [0 for _ in range(num_ops)]
tmp[op2id[op]] = 1
res += tmp
return res
| 6,554 | 27.25431 | 106 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/utils/preprocessor.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import warnings
import struct
import webrtcvad
from skimage.transform import resize
from scipy.ndimage.morphology import binary_dilation
import pyworld as pw
import numpy as np
import torch
import librosa
import pyloudnorm as pyln
from utils import audio
from utils.world_utils import f0_to_coarse
from utils.parse_textgrid import remove_empty_lines, TextGrid
warnings.filterwarnings("ignore")
int16_max = (2 ** 15) - 1
def trim_long_silences(path, sr, return_raw_wav=False, norm=True):
"""
Ensures that segments without voice in the waveform remain no longer than a
threshold determined by the VAD parameters in params.py.
:param wav: the raw waveform as a numpy array of floats
:return: the same waveform with silences trimmed away (length <= original wav length)
"""
## Voice Activation Detection
# Window size of the VAD. Must be either 10, 20 or 30 milliseconds.
# This sets the granularity of the VAD. Should not need to be changed.
sampling_rate = 16000
wav_raw, sr = librosa.core.load(path, sr=sr)
if norm:
meter = pyln.Meter(sr) # create BS.1770 meter
loudness = meter.integrated_loudness(wav_raw)
wav_raw = pyln.normalize.loudness(wav_raw, loudness, -20.0)
if np.abs(wav_raw).max() > 1.0:
wav_raw = wav_raw / np.abs(wav_raw).max()
wav = librosa.resample(wav_raw, sr, sampling_rate, res_type='kaiser_best')
vad_window_length = 30 # In milliseconds
# Number of frames to average together when performing the moving average smoothing.
# The larger this value, the larger the VAD variations must be to not get smoothed out.
vad_moving_average_width = 8
# Maximum number of consecutive silent frames a segment can have.
vad_max_silence_length = 12
# Compute the voice detection window size
samples_per_window = (vad_window_length * sampling_rate) // 1000
# Trim the end of the audio to have a multiple of the window size
wav = wav[:len(wav) - (len(wav) % samples_per_window)]
# Convert the float waveform to 16-bit mono PCM
pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16))
# Perform voice activation detection
voice_flags = []
vad = webrtcvad.Vad(mode=3)
for window_start in range(0, len(wav), samples_per_window):
window_end = window_start + samples_per_window
voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2],
sample_rate=sampling_rate))
voice_flags = np.array(voice_flags)
# Smooth the voice detection with a moving average
def moving_average(array, width):
array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2)))
ret = np.cumsum(array_padded, dtype=float)
ret[width:] = ret[width:] - ret[:-width]
return ret[width - 1:] / width
audio_mask = moving_average(voice_flags, vad_moving_average_width)
audio_mask = np.round(audio_mask).astype(np.bool)
# Dilate the voiced regions
audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1))
audio_mask = np.repeat(audio_mask, samples_per_window)
audio_mask = resize(audio_mask, (len(wav_raw),)) > 0
if return_raw_wav:
return wav_raw, audio_mask
return wav_raw[audio_mask], audio_mask
def process_utterance(wav_path,
fft_size=1024,
hop_size=256,
win_length=1024,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
eps=1e-10,
sample_rate=22050,
loud_norm=False,
min_level_db=-100,
return_linear=False,
trim_long_sil=False, vocoder='pwg'):
if isinstance(wav_path, str):
if trim_long_sil:
wav, _ = trim_long_silences(wav_path, sample_rate)
else:
wav, _ = librosa.core.load(wav_path, sr=sample_rate)
else:
wav = wav_path
if loud_norm:
meter = pyln.Meter(sample_rate) # create BS.1770 meter
loudness = meter.integrated_loudness(wav)
wav = pyln.normalize.loudness(wav, loudness, -22.0)
if np.abs(wav).max() > 1:
wav = wav / np.abs(wav).max()
# get amplitude spectrogram
x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size,
win_length=win_length, window=window, pad_mode="constant")
spc = np.abs(x_stft) # (n_bins, T)
# get mel basis
fmin = 0 if fmin is -1 else fmin
fmax = sample_rate / 2 if fmax is -1 else fmax
mel_basis = librosa.filters.mel(sample_rate, fft_size, num_mels, fmin, fmax)
mel = mel_basis @ spc
if vocoder == 'pwg':
mel = np.log10(np.maximum(eps, mel)) # (n_mel_bins, T)
else:
assert False, f'"{vocoder}" is not in ["pwg"].'
l_pad, r_pad = audio.librosa_pad_lr(wav, fft_size, hop_size, 1)
wav = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=0.0)
wav = wav[:mel.shape[1] * hop_size]
if not return_linear:
return wav, mel
else:
spc = audio.amp_to_db(spc)
spc = audio.normalize(spc, {'min_level_db': min_level_db})
return wav, mel, spc
def get_pitch(wav_data, mel, hparams):
"""
:param wav_data: [T]
:param mel: [T, 80]
:param hparams:
:return:
"""
_f0, t = pw.dio(wav_data.astype(np.double), hparams['audio_sample_rate'],
frame_period=hparams['hop_size'] / hparams['audio_sample_rate'] * 1000)
f0 = pw.stonemask(wav_data.astype(np.double), _f0, t, hparams['audio_sample_rate']) # pitch refinement
delta_l = len(mel) - len(f0)
assert np.abs(delta_l) <= 2
if delta_l > 0:
f0 = np.concatenate([f0] + [f0[-1]] * delta_l)
f0 = f0[:len(mel)]
pitch_coarse = f0_to_coarse(f0) + 1
return f0, pitch_coarse
def get_mel2ph(tg_fn, ph, mel, hparams):
ph_list = ph.split(" ")
with open(tg_fn, "r") as f:
tg = f.readlines()
tg = remove_empty_lines(tg)
tg = TextGrid(tg)
tg = json.loads(tg.toJson())
split = np.zeros(len(ph_list) + 1, np.int)
split[0] = 0
split[-1] = len(mel)
tg_idx = 0
ph_idx = 1
tg_align = [x for x in tg['tiers'][0]['items']]
while tg_idx < len(tg_align):
ph = ph_list[ph_idx]
x = tg_align[tg_idx]
# if ph != '|' and x['text'] == '':
# tg_idx += 1
# continue
# elif ph == '|' and x['text'] != '':
# split[ph_idx] = split[ph_idx - 1]
# ph_idx += 1
# else: # ph == '|' and x['text'] == '' or ph != '|' and x['text'] != ''
# if x['text'] not in ['punc', '']:
# assert x['text'] == ph_list[ph_idx].lower(), (x['text'], ph_list[ph_idx])
# if x['text'] == '':
# assert ph == '|', (ph, '|')
# split[ph_idx] = int(float(x['xmin']) * hparams['audio_sample_rate'] / hparams['hop_size'])
# ph_idx += 1
# tg_idx += 1
if x['text'] == '':
tg_idx += 1
continue
if x['text'] not in ['punc', 'sep']:
assert x['text'] == ph_list[ph_idx].lower(), (x['text'], ph_list[ph_idx])
if x['text'] == 'sep':
assert ph == '|', (ph, '|')
split[ph_idx] = int(float(x['xmin']) * hparams['audio_sample_rate'] / hparams['hop_size'])
ph_idx += 1
tg_idx += 1
assert tg_idx == len(tg_align), ph_idx == len(ph_list)
split[ph_idx] = int(float(x['xmax']) * hparams['audio_sample_rate'] / hparams['hop_size'])
mel2ph = np.zeros([mel.shape[0]], np.int)
for ph_idx in range(len(ph_list)):
mel2ph[split[ph_idx]:split[ph_idx + 1]] = ph_idx + 1
mel2ph_torch = torch.from_numpy(mel2ph)
T_t = len(ph_list)
dur = mel2ph_torch.new_zeros([T_t + 1]).scatter_add(0, mel2ph_torch, torch.ones_like(mel2ph_torch))
dur = dur[1:].numpy()
return mel2ph, dur
| 8,211 | 35.990991 | 107 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/utils/tts_utils.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
from utils.stft import STFT
def make_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
Tensor: Mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 1],
[0, 0, 0, 1]],
[[0, 0, 1, 1],
[0, 0, 1, 1]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_pad_mask(lengths, xs, 1)
tensor([[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
>>> make_pad_mask(lengths, xs, 2)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
"""
if length_dim == 0:
raise ValueError("length_dim cannot be 0: {}".format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(
slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
)
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def make_non_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
ByteTensor: mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 0],
[1, 1, 1, 0]],
[[1, 1, 0, 0],
[1, 1, 0, 0]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_non_pad_mask(lengths, xs, 1)
tensor([[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
>>> make_non_pad_mask(lengths, xs, 2)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
return ~make_pad_mask(lengths, xs, length_dim)
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
if torch.cuda.device_count() > 0:
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
else:
ids = torch.arange(0, max_len, out=torch.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
class GeneralDenoiser(torch.nn.Module):
def __init__(self, filter_length=1024, n_overlap=4, win_length=1024):
super(GeneralDenoiser, self).__init__()
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length / n_overlap),
win_length=win_length)
if torch.cuda.device_count() > 0:
self.stft = self.stft.cuda()
def forward(self, audio, noise_audio=None, strength=0.3):
if torch.cuda.device_count() > 0:
audio_spec, audio_angles = self.stft.transform(audio.cuda().float())
noise_audio_spec = self.stft.transform(noise_audio.cuda().float())[0] \
if noise_audio is not None else torch.ones([1, 513, 1]).cuda() * 0.1
else:
audio_spec, audio_angles = self.stft.transform(audio.float())
noise_audio_spec = self.stft.transform(noise_audio.float())[0] \
if noise_audio is not None else torch.ones([1, 513, 1]) * 0.1
noise_audio_spec = noise_audio_spec.mean(-1)[:, :, None] * strength
audio_spec_denoised = audio_spec - noise_audio_spec
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
| 8,863 | 38.048458 | 84 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/tasks/base_task.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import random
import logging
import matplotlib
matplotlib.use('Agg')
import numpy as np
import torch.distributed as dist
from pytorch_lightning.logging import TensorBoardLogger
from torch import nn
import torch.utils.data
import utils
from utils.hparams import hparams, set_hparams
from utils.pl_utils import LatestModelCheckpoint, BaseTrainer, data_loader, DDP
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, data_dir, prefix, hparams, shuffle):
super().__init__()
self.hparams = hparams
self.shuffle = shuffle
self.data_dir = data_dir
self.prefix = prefix
self.sort_by_len = hparams['sort_by_len']
self.sizes = None
@property
def _sizes(self):
return self.sizes
def __getitem__(self, index):
raise NotImplementedError
def collater(self, samples):
raise NotImplementedError
def __len__(self):
return len(self._sizes)
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
size = min(self._sizes[index], hparams['max_frames'])
return size
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self))
if self.sort_by_len:
indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')]
else:
indices = np.arange(len(self))
return indices
@property
def num_workers(self):
return int(os.getenv('NUM_WORKERS', 1))
class BaseTask(nn.Module):
def __init__(self, *args, **kwargs):
# dataset configs
super(BaseTask, self).__init__(*args, **kwargs)
self.current_epoch = 0
self.global_step = 0
self.loaded_optimizer_states_dict = {}
self.trainer = None
self.logger = None
self.on_gpu = False
self.use_dp = False
self.use_ddp = False
self.example_input_array = None
self.max_tokens = hparams['max_tokens']
self.max_sentences = hparams['max_sentences']
self.max_eval_tokens = hparams['max_eval_tokens']
if self.max_eval_tokens == -1:
hparams['max_eval_tokens'] = self.max_eval_tokens = self.max_tokens
self.max_eval_sentences = hparams['max_eval_sentences']
if self.max_eval_sentences == -1:
hparams['max_eval_sentences'] = self.max_eval_sentences = self.max_sentences
print('| set hparams: ')
for i, (k, v) in enumerate(sorted(hparams.items())):
print(f"\033[;33;m{k}\033[0m: {v}, ", end="\n" if i % 5 == 4 else "")
print("")
self.model = None
self.training_losses_meter = None
###########
# Training, validation and testing
###########
def build_model(self):
raise NotImplementedError
def on_epoch_start(self):
self.training_losses_meter = {'total_loss': utils.AverageMeter()}
def _training_step(self, sample, batch_idx, optimizer_idx):
"""
:param sample:
:param batch_idx:
:return: total loss: torch.Tensor, loss_log: dict
"""
raise NotImplementedError
def training_step(self, sample, batch_idx, optimizer_idx=-1):
loss_ret = self._training_step(sample, batch_idx, optimizer_idx)
self.opt_idx = optimizer_idx
if loss_ret is None:
return {'loss': None}
total_loss, log_outputs = loss_ret
log_outputs = utils.tensors_to_scalars(log_outputs)
for k, v in log_outputs.items():
if k not in self.training_losses_meter:
self.training_losses_meter[k] = utils.AverageMeter()
if not np.isnan(v):
self.training_losses_meter[k].update(v)
self.training_losses_meter['total_loss'].update(total_loss.item())
try:
log_outputs['lr'] = self.scheduler.get_lr()
if isinstance(log_outputs['lr'], list):
log_outputs['lr'] = log_outputs['lr'][0]
except:
pass
log_outputs['all_loss'] = total_loss.item()
if optimizer_idx != -1:
log_outputs[f'loss_{optimizer_idx}'] = log_outputs.pop('all_loss')
progress_bar_log = log_outputs
tb_log = {f'tr/{k}': v for k, v in log_outputs.items()}
ret = {
'loss': total_loss,
'progress_bar': progress_bar_log,
'log': tb_log
}
return ret
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx):
optimizer.step()
optimizer.zero_grad()
self.scheduler.step(self.global_step // hparams['accumulate_grad_batches'])
def on_epoch_end(self):
loss_outputs = {k: round(v.avg, 4) for k, v in self.training_losses_meter.items()}
print(f"\n==============\n "
f"Epoch {self.current_epoch} ended. Steps: {self.global_step}. {loss_outputs}"
f"\n==============\n")
def validation_step(self, sample, batch_idx):
"""
:param sample:
:param batch_idx:
:return: output: dict
"""
raise NotImplementedError
def _validation_end(self, outputs):
"""
:param outputs:
:return: loss_output: dict
"""
raise NotImplementedError
def validation_end(self, outputs):
loss_output = self._validation_end(outputs)
print(f"\n==============\n "
f"valid results: {loss_output}"
f"\n==============\n")
return {
'log': {f'val/{k}': v for k, v in loss_output.items()},
'val_loss': loss_output['total_loss']
}
def build_scheduler(self, optimizer):
raise NotImplementedError
def build_optimizer(self, model):
raise NotImplementedError
def configure_optimizers(self):
set_hparams()
self.model = self.build_model()
print(self.model)
optm = self.build_optimizer(self.model)
self.scheduler = self.build_scheduler(optm)
return [optm]
def test_start(self):
pass
def test_step(self, sample, batch_idx):
return self.validation_step(sample, batch_idx)
def test_end(self, outputs):
return self.validation_end(outputs)
###########
# Running configuration
###########
@classmethod
def start(cls):
set_hparams()
os.environ['MASTER_PORT'] = str(random.randint(15000, 30000))
random.seed(hparams['seed'])
np.random.seed(hparams['seed'])
task = cls()
trainer = BaseTrainer(checkpoint_callback=LatestModelCheckpoint(
filepath=hparams['work_dir'],
verbose=True,
monitor='val_loss',
mode='min',
num_keep=5,
period=1 if hparams['save_ckpt'] else 100000
),
logger=TensorBoardLogger(
save_dir=hparams['work_dir'],
name='lightning_logs',
version='lastest'
),
gradient_clip_val=hparams['clip_grad_norm'],
val_check_interval=hparams['val_check_interval'],
row_log_interval=hparams['log_interval'],
max_updates=hparams['max_updates'],
num_sanity_val_steps=hparams['num_sanity_val_steps'] if not hparams[
'validate'] else 10000,
accumulate_grad_batches=hparams['accumulate_grad_batches'],
resume_from_checkpoint=hparams['resume_from_checkpoint'],
show_progress_bar=hparams['show_progress_bar'])
if not hparams['infer']: # train
trainer.checkpoint_callback.task = task
trainer.fit(task)
else:
trainer.test(task)
def configure_ddp(self, model, device_ids):
model = DDP(
model,
device_ids=device_ids,
find_unused_parameters=True
)
if dist.get_rank() != 0 and not hparams['debug']:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
random.seed(hparams['seed'])
np.random.seed(hparams['seed'])
return model
def training_end(self, *args, **kwargs):
return None
def init_ddp_connection(self, proc_rank, world_size):
# guarantees unique ports across jobs from same grid search
default_port = 12910
# if user gave a port number, use that one instead
try:
default_port = os.environ['MASTER_PORT']
except Exception:
os.environ['MASTER_PORT'] = str(default_port)
# figure out the root node addr
root_node = '127.0.0.2'
root_node = self.trainer.resolve_root_node_address(root_node)
os.environ['MASTER_ADDR'] = root_node
dist.init_process_group('nccl', rank=proc_rank, world_size=world_size)
@data_loader
def train_dataloader(self):
return None
@data_loader
def test_dataloader(self):
return None
@data_loader
def val_dataloader(self):
return None
def on_load_checkpoint(self, checkpoint):
pass
def on_save_checkpoint(self, checkpoint):
pass
def on_sanity_check_start(self):
pass
def on_train_start(self):
pass
def on_train_end(self):
pass
def on_batch_start(self, batch):
pass
def on_batch_end(self):
pass
def on_pre_performance_check(self):
pass
def on_post_performance_check(self):
pass
def on_before_zero_grad(self, optimizer):
pass
def on_after_backward(self):
pass
def backward(self, loss, optimizer):
loss.backward()
def grad_norm(self, norm_type):
results = {}
total_norm = 0
for name, p in self.named_parameters():
if p.requires_grad:
try:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
norm = param_norm ** (1 / norm_type)
grad = round(norm.data.cpu().numpy().flatten()[0], 3)
results['grad_{}_norm_{}'.format(norm_type, name)] = grad
except Exception:
# this param had no grad
pass
total_norm = total_norm ** (1. / norm_type)
grad = round(total_norm.data.cpu().numpy().flatten()[0], 3)
results['grad_{}_norm_total'.format(norm_type)] = grad
return results
| 11,465 | 31.207865 | 98 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/tasks/lightspeech_inference.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os, glob, re
from tqdm import tqdm
import numpy as np
import torch
import utils
from utils.hparams import hparams, set_hparams
from tasks.lightspeech import LightSpeechDataset, LightSpeechTask
set_hparams()
def get_latest_ckpt(dir):
# function that returns the latest checkpoint path from the given dir
ckpt_list = sorted(glob.glob(f'{dir}/model_ckpt_steps_*.ckpt'),
key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0]))
print("INFO: located checkpoint {}. loading...".format(ckpt_list[0]))
return ckpt_list[0]
# build LightSpeechTask then the model itself
task = LightSpeechTask()
task.model = task.build_model()
# load the latest checkpoint from work_dir defined in hparams
ckpt = torch.load(get_latest_ckpt(hparams['work_dir']))
task.global_step = ckpt['global_step']
task.load_state_dict(ckpt['state_dict'])
# load the model to gpu
task.model.eval().cuda()
# prepare vocoder
task.prepare_vocoder()
# define LightSpeechDataset. will only use the functions (text_to_phone and phone_to_prior) and not the actual test dataset
dataset = LightSpeechDataset(hparams['data_dir'], task.phone_encoder, None, hparams, shuffle=False, infer_only=True)
# inference requires phoneme input and the corresponding target_mean and target_std
with open(hparams['inference_text'], 'r') as f:
user_text = f.readlines()
# create sample dir inside work_dir in hparams
gen_dir = os.path.join(hparams['work_dir'], f"inference_{hparams['inference_text']}_{task.global_step}")
os.makedirs(gen_dir, exist_ok=True)
os.makedirs(f'{gen_dir}/wavs', exist_ok=True)
os.makedirs(f'{gen_dir}/spec_plot', exist_ok=True)
os.makedirs(f'{gen_dir}/pitch_plot', exist_ok=True)
# perform text-to-speech then save mel and wav
with torch.no_grad():
for i, text in enumerate(tqdm(user_text)):
text = text.strip()
phone = torch.LongTensor(dataset.text_to_phone(text))
phone = phone.unsqueeze(0).cuda()
output = task.model(phone, None, None, None, None, None)
output['outputs'] = output['mel_out']
_output = utils.unpack_dict_to_list(output)[0]
output = {}
for k, v in _output.items():
if type(v) is torch.Tensor:
output[k] = v.cpu().numpy()
mel_out = task.remove_padding(output['mel_out'])
noise_outputs = task.remove_padding(output.get("noise_outputs"))
pitch_pred = task.remove_padding(output.get("pitch"))
wav_out = task.inv_spec(mel_out, pitch_pred, noise_outputs)
# save mel and wav
task.save_result(wav_out, mel_out, f'P', i, text, gen_dir, pitch=pitch_pred)
| 2,707 | 37.685714 | 123 | py |
NeuralSpeech | NeuralSpeech-master/LightSpeech/tasks/lightspeech.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import re
import glob
import logging
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from multiprocessing.pool import Pool
from tqdm import tqdm
import numpy as np
import torch
import torch.optim
import torch.utils.data
import torch.nn.functional as F
import torch.distributed as dist
from modules.lightspeech import LightSpeech
from modules.tts_modules import DurationPredictorLoss
from tasks.base_task import BaseDataset, BaseTask
import utils
from utils.pl_utils import data_loader
from utils.hparams import hparams
from utils.indexed_datasets import IndexedDataset
from utils.text_encoder import TokenTextEncoder
from utils import audio
from utils.pwg_decode_from_mel import generate_wavegan, load_pwg_model
from utils.plot import plot_to_figure
from utils.world_utils import restore_pitch, process_f0
from utils.tts_utils import GeneralDenoiser
from g2p_en import G2p
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
class LightSpeechDataset(BaseDataset):
"""A dataset that provides helpers for batching."""
def __init__(self, data_dir, phone_encoder, prefix, hparams, shuffle=False, infer_only=False):
super().__init__(data_dir, prefix, hparams, shuffle)
self.phone_encoder = phone_encoder
self.infer_only = infer_only
if not self.infer_only:
self.data = None
self.idx2key = np.load(f'{self.data_dir}/{self.prefix}_all_keys.npy')
self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy')
self.num_spk = hparams['num_spk']
self.use_indexed_ds = hparams['indexed_ds']
self.indexed_bs = None
self.g2p = G2p()
if not self.infer_only:
# filter out items with no pitch
f0s = np.load(f'{self.data_dir}/{prefix}_f0s.npy', allow_pickle=True)
self.avail_idxs = [i for i, f0 in enumerate(f0s) if sum(f0) > 0]
self.sizes = [self.sizes[i] for i in self.avail_idxs]
# pitch stats
f0s = np.load(f'{self.data_dir}/train_f0s.npy', allow_pickle=True)
f0s = np.concatenate(f0s, 0)
f0s = f0s[f0s != 0]
hparams['f0_mean'] = self.f0_mean = np.mean(f0s).item()
hparams['f0_std'] = self.f0_std = np.std(f0s).item()
def text_to_phone(self, txt):
# function that converts the user-given text to phoneme sequence used in LightSpeech
# the implementation mirrors datasets/tts/lj/prepare.py and datasets/tts/lj/gen_fs2_p.py
# input: text string
# output: encoded phoneme string
phs = [p.replace(" ", "|") for p in self.g2p(txt)]
ph = " ".join(phs)
ph = "<UNK> " + ph + " <EOS>"
phone_encoded = self.phone_encoder.encode(ph)
return phone_encoded
def _get_item(self, index):
if not self.use_indexed_ds:
key = self.idx2key[index]
item = np.load(f'{self.data_dir}/{self.prefix}/{key}.npy', allow_pickle=True).item()
else:
if self.indexed_bs is None:
self.indexed_bs = IndexedDataset(f'{self.data_dir}/{self.prefix}')
item = self.indexed_bs[index]
return item
def __getitem__(self, index):
hparams = self.hparams
index = self.avail_idxs[index]
key = self.idx2key[index]
item = self._get_item(index)
spec = torch.Tensor(item['mel'])
energy = (spec.exp() ** 2).sum(-1).sqrt()[:hparams['max_frames']]
mel2ph = torch.LongTensor(item['mel2ph'])[:hparams['max_frames']]
f0, uv = process_f0(item["f0"], hparams)
phone = torch.LongTensor(item['phone'][:hparams['max_input_tokens']])
sample = {
"id": index,
"utt_id": key,
"text": item['txt'],
"source": phone,
"target": spec[:hparams['max_frames']],
"pitch": torch.LongTensor(item.get("pitch"))[:hparams['max_frames']],
"energy": energy,
"f0": f0[:hparams['max_frames']],
"uv": uv[:hparams['max_frames']],
"mel2ph": mel2ph,
}
if self.num_spk > 1:
sample["spk_id"] = item['spk_id']
sample["spk_embed"] = item['spk_embed']
return sample
def collater(self, samples):
if len(samples) == 0:
return {}
pad_idx = self.phone_encoder.pad()
id = torch.LongTensor([s['id'] for s in samples])
utt_ids = [s['utt_id'] for s in samples]
text = [s['text'] for s in samples]
src_tokens = utils.collate_1d([s['source'] for s in samples], pad_idx)
f0 = utils.collate_1d([s['f0'] for s in samples], -200) if self.hparams['use_pitch_embed'] else None
uv = utils.collate_1d([s['uv'] for s in samples]) if self.hparams['use_pitch_embed'] else None
energy = utils.collate_1d([s['energy'] for s in samples], pad_idx) if self.hparams['use_energy_embed'] else None
mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], pad_idx)
target = utils.collate_2d([s['target'] for s in samples], pad_idx)
prev_output_mels = utils.collate_2d([s['target'] for s in samples], pad_idx, shift_right=True)
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
target_lengths = torch.LongTensor([s['target'].shape[0] for s in samples])
ntokens = sum(len(s['source']) for s in samples)
nmels = sum(len(s['target']) for s in samples)
batch = {
'id': id,
'utt_id': utt_ids,
'nsamples': len(samples),
'ntokens': ntokens,
'nmels': nmels,
'text': text,
'src_tokens': src_tokens,
'mel2ph': mel2ph,
'src_lengths': src_lengths,
'targets': target,
'energy': energy,
'target_lengths': target_lengths,
'prev_output_mels': prev_output_mels,
'pitch': f0,
'uv': uv,
}
if self.num_spk > 1:
spk_ids = torch.LongTensor([s['spk_id'] for s in samples])
spk_embed = torch.FloatTensor([s['spk_embed'] for s in samples])
batch['spk_ids'] = spk_ids
batch['spk_embed'] = spk_embed
return batch
class RSQRTSchedule(object):
def __init__(self, optimizer):
super().__init__()
self.optimizer = optimizer
self.constant_lr = hparams['lr']
self.warmup_updates = hparams['warmup_updates']
self.hidden_size = hparams['hidden_size']
self.lr = hparams['lr']
for param_group in optimizer.param_groups:
param_group['lr'] = self.lr
self.step(0)
def step(self, num_updates):
constant_lr = self.constant_lr
warmup = min(num_updates / self.warmup_updates, 1.0)
rsqrt_decay = max(self.warmup_updates, num_updates) ** -0.5
rsqrt_hidden = self.hidden_size ** -0.5
self.lr = max(constant_lr * warmup * rsqrt_decay * rsqrt_hidden, 1e-7)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
return self.lr
def get_lr(self):
return self.optimizer.param_groups[0]['lr']
class LightSpeechTask(BaseTask):
def __init__(self):
super().__init__()
self.arch = hparams['arch']
if isinstance(self.arch, str):
self.arch = list(map(int, self.arch.strip().split()))
if self.arch is not None:
self.num_heads = utils.get_num_heads(self.arch[hparams['enc_layers']:])
self.vocoder = None
self.phone_encoder = self.build_phone_encoder(hparams['data_dir'])
self.padding_idx = self.phone_encoder.pad()
self.eos_idx = self.phone_encoder.eos()
self.seg_idx = self.phone_encoder.seg()
self.saving_result_pool = None
self.saving_results_futures = None
self.stats = {}
self.dur_loss_fn = DurationPredictorLoss()
self.mse_loss_fn = torch.nn.MSELoss()
@data_loader
def train_dataloader(self):
train_dataset = LightSpeechDataset(hparams['data_dir'], self.phone_encoder,
hparams['train_set_name'], hparams, shuffle=True)
return self.build_dataloader(train_dataset, True, self.max_tokens, self.max_sentences,
endless=hparams['endless_ds'])
@data_loader
def val_dataloader(self):
valid_dataset = LightSpeechDataset(hparams['data_dir'], self.phone_encoder,
hparams['valid_set_name'], hparams,
shuffle=False)
return self.build_dataloader(valid_dataset, False, self.max_eval_tokens, self.max_eval_sentences)
@data_loader
def test_dataloader(self):
test_dataset = LightSpeechDataset(hparams['data_dir'], self.phone_encoder,
hparams['test_set_name'], hparams, shuffle=False)
return self.build_dataloader(test_dataset, False, self.max_eval_tokens, self.max_eval_sentences)
def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None,
required_batch_size_multiple=-1, endless=False):
if required_batch_size_multiple == -1:
required_batch_size_multiple = torch.cuda.device_count() if torch.cuda.device_count() > 0 else 1
if max_tokens is not None:
max_tokens *= torch.cuda.device_count() if torch.cuda.device_count() > 0 else 1
if max_sentences is not None:
max_sentences *= torch.cuda.device_count() if torch.cuda.device_count() > 0 else 1
indices = dataset.ordered_indices()
batch_sampler = utils.batch_by_size(
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
if shuffle:
batches = torch.utils.data.sampler.SubsetRandomSampler(batch_sampler)
if self.trainer.use_ddp:
num_replicas = dist.get_world_size()
rank = dist.get_rank()
batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0]
batches = torch.utils.data.sampler.SubsetRandomSampler(batches)
else:
batches = batch_sampler
if self.trainer.use_ddp:
num_replicas = dist.get_world_size()
rank = dist.get_rank()
batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0]
return torch.utils.data.DataLoader(
dataset,
collate_fn=dataset.collater,
batch_sampler=batches,
num_workers=dataset.num_workers,
pin_memory=True
)
def build_phone_encoder(self, data_dir):
phone_list_file = os.path.join(data_dir, 'phone_set.json')
phone_list = json.load(open(phone_list_file))
return TokenTextEncoder(None, vocab_list=phone_list)
def build_model(self):
arch = self.arch
model = LightSpeech(arch, self.phone_encoder)
total_params = utils.count_parameters(model)
logging.info('Model Size: {}'.format(total_params))
return model
def build_scheduler(self, optimizer):
return RSQRTSchedule(optimizer)
def build_optimizer(self, model):
self.optimizer = optimizer = torch.optim.AdamW(
model.parameters(),
lr=hparams['lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
weight_decay=hparams['weight_decay'])
return optimizer
def _training_step(self, sample, batch_idx, _):
input = sample['src_tokens'] # [B, T_t]
target = sample['targets'] # [B, T_s, 80]
mel2ph = sample['mel2ph'] # [B, T_s]
pitch = sample['pitch']
energy = sample['energy']
uv = sample['uv']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
loss_output, output = self.run_model(self.model, input, mel2ph, spk_embed, target,
pitch=pitch, uv=uv, energy=energy,
return_output=True)
total_loss = sum([v for v in loss_output.values() if v.requires_grad])
loss_output['batch_size'] = target.size()[0]
return total_loss, loss_output
def loss(self, decoder_output, target):
# decoder_output : B x T x (mel+1)
# target : B x T x mel
predicted_mel = decoder_output[:, :, :hparams['audio_num_mel_bins']]
predicted_stop = decoder_output[:, :, -1]
seq_mask, stop_mask = self.make_stop_target(target)
l1_loss = F.l1_loss(predicted_mel, target, reduction='none')
l2_loss = F.mse_loss(predicted_mel, target, reduction='none')
weights = self.weights_nonzero_speech(target)
l1_loss = (l1_loss * weights).sum() / weights.sum()
l2_loss = (l2_loss * weights).sum() / weights.sum()
stop_loss = (self.weighted_cross_entropy_with_logits(stop_mask, predicted_stop,
hparams['stop_token_weight']) * seq_mask).sum()
stop_loss = stop_loss / (seq_mask.sum() + target.size(0) * (hparams['stop_token_weight'] - 1))
return {
'l1': l1_loss,
'l2': l2_loss,
'stop_loss': stop_loss,
}
def validation_step(self, sample, batch_idx):
input = sample['src_tokens']
target = sample['targets']
mel2ph = sample['mel2ph']
pitch = sample['pitch']
energy = sample['energy']
uv = sample['uv']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
outputs = {}
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, input, mel2ph, spk_embed, target,
pitch=pitch, uv=uv,
energy=energy,
return_output=True)
outputs['total_loss'] = outputs['losses']['mel']
outputs['nmels'] = sample['nmels']
outputs['nsamples'] = sample['nsamples']
outputs = utils.tensors_to_scalars(outputs)
if batch_idx < 10:
if 'pitch_logits' in model_out:
pitch[uv > 0] = -4
pitch_pred = model_out['pitch_logits'][:, :, 0]
pitch_pred[model_out['pitch_logits'][:, :, 1] > 0] = -4
self.logger.experiment.add_figure(f'pitch_{batch_idx}', plot_to_figure({
'gt': pitch[0].detach().cpu().numpy(),
'pred': pitch_pred[0].detach().cpu().numpy()
}), self.global_step)
return outputs
def _validation_end(self, outputs):
all_losses_meter = {
'total_loss': utils.AverageMeter(),
}
for output in outputs:
n = output['nsamples']
for k, v in output['losses'].items():
if k not in all_losses_meter:
all_losses_meter[k] = utils.AverageMeter()
all_losses_meter[k].update(v, n)
all_losses_meter['total_loss'].update(output['total_loss'], n)
return {k: round(v.avg, 4) for k, v in all_losses_meter.items()}
def test_step(self, sample, batch_idx):
logging.info('inferring batch {} with {} samples'.format(batch_idx, sample['nsamples']))
with utils.Timer('trans_tts', print_time=hparams['profile_infer']):
decoded_mel, encdec_attn, hit_eos, _, focus_rate, phone_coverage_rate, diagonal_focus_rate = \
self.infer_batch(sample)
hit_eos = hit_eos[:, 1:]
outputs = decoded_mel
predict_lengths = (1.0 - hit_eos.float()).sum(dim=-1)
outputs *= (1.0 - hit_eos.float())[:, :, None]
sample['outputs'] = outputs
sample['predict_mels'] = decoded_mel
sample['predict_lengths'] = predict_lengths
sample['encdec_attn'] = encdec_attn
self.after_infer(sample)
def infer_batch(self, sample):
model = self.model
input = sample['src_tokens']
bsz = input.size(0)
max_input_len = input.size(1)
decode_length = self.estimate_decode_length(max_input_len)
encoder_outputs = model.forward_encoder(input)
encoder_out = encoder_outputs['encoder_out']
encoder_padding_mask = encoder_outputs['encoder_padding_mask']
hit_eos = input.new(bsz, 1).fill_(0).bool()
stop_logits = input.new(bsz, 0).fill_(0).float()
stage = 0
decoder_input = input.new(bsz, decode_length + 1, hparams['audio_num_mel_bins']).fill_(
0).float()
decoded_mel = input.new(bsz, 0, hparams['audio_num_mel_bins']).fill_(0).float()
encdec_attn_logits = []
for i in range(hparams['dec_layers']):
encdec_attn_logits.append(input.new(bsz, self.num_heads[i], 0, max_input_len).fill_(0).float())
attn_pos = input.new(bsz).fill_(0).int()
use_masks = []
for i in range(hparams['dec_layers']):
use_masks.append(input.new(self.num_heads[i]).fill_(0).float())
incremental_state = {}
step = 0
if hparams['attn_constraint']:
for i, layer in enumerate(model.decoder.layers):
enc_dec_attn_constraint_mask = input.new(bsz, self.num_heads[i], max_input_len).fill_(0).int()
layer.set_buffer('enc_dec_attn_constraint_mask', enc_dec_attn_constraint_mask, incremental_state)
def is_finished(step, decode_length, hit_eos, stage):
finished = step >= decode_length
finished |= (hit_eos[:, -1].sum() == hit_eos.size(0)).cpu().numpy()
if hparams['attn_constraint']:
finished &= stage != 0
return finished
while True:
if is_finished(step, decode_length, hit_eos, stage):
break
decoder_output, attn_logits = model.forward_decoder(
decoder_input[:, :step + 1], encoder_out,
encoder_padding_mask,
incremental_state=incremental_state
)
next_mel = decoder_output[:, -1:, :hparams['audio_num_mel_bins']]
stop_logit = decoder_output[:, -1:, -1]
stop_logits = torch.cat((stop_logits, stop_logit), dim=1)
decoded_mel = torch.cat((decoded_mel, next_mel), dim=1)
for i in range(hparams['dec_layers']):
encdec_attn_logits[i] = torch.cat((encdec_attn_logits[i], attn_logits[i]), dim=2)
step += 1
this_hit_eos = hit_eos[:, -1:]
if hparams['attn_constraint']:
this_hit_eos |= (attn_pos[:, None] >= (encoder_padding_mask < 1.0).float().sum(dim=-1,
keepdim=True).int() - 5) & (torch.sigmoid(stop_logit) > 0.5)
else:
this_hit_eos |= torch.sigmoid(stop_logit) > 0.5
hit_eos = torch.cat((hit_eos, this_hit_eos), dim=1)
decoder_input[:, step] = next_mel[:, -1]
if hparams['attn_constraint']:
stage_change_step = 50
all_prev_weights = []
for i in range(hparams['dec_layers']):
all_prev_weights.append(torch.softmax(encdec_attn_logits[i], dim=-1)) # bsz x head x L x L_kv
# if the stage should change
next_stage = (step == stage_change_step) | (step >= decode_length)
next_stage |= (hit_eos[:, -1].sum() == hit_eos.size(0)).cpu().numpy()
next_stage &= (stage == 0)
# choose the diagonal attention
if next_stage: # TODO
use_masks = []
for i in range(hparams['dec_layers']):
use_mask = (all_prev_weights[i][:, :, :step].max(dim=-1).values.mean(
dim=(0, 2)) > 0.6).float() # [head]
use_masks.append(use_mask)
attn_pos = input.new(bsz).fill_(0).int()
# reseet when the stage changes
for layer in model.decoder.layers:
layer.clear_buffer(input, encoder_out, encoder_padding_mask, incremental_state)
encdec_attn_logits = []
for i in range(hparams['dec_layers']):
encdec_attn_logits.append(
input.new(bsz, self.num_heads[i], 0, max_input_len).fill_(0).float())
decoded_mel = input.new(bsz, 0, hparams['audio_num_mel_bins']).fill_(0).float()
decoder_input = input.new(bsz, decode_length + 1, hparams['audio_num_mel_bins']).fill_(
0).float()
hit_eos = input.new(bsz, 1).fill_(0).bool()
stage = stage + 1
step = 0
prev_weights_mask1 = utils.sequence_mask(
torch.max(attn_pos - 1, attn_pos.new(attn_pos.size()).fill_(0)).float(),
encdec_attn_logits[0].size(-1)).float() # bsz x L_kv
prev_weights_mask2 = 1.0 - utils.sequence_mask(attn_pos.float() + 4,
encdec_attn_logits[0].size(-1)).float() # bsz x L_kv
enc_dec_attn_constraint_masks = []
for i in range(hparams['dec_layers']):
mask = (prev_weights_mask1 + prev_weights_mask2)[:, None, :] * use_masks[i][None, :, None] # bsz x head x L_kv
enc_dec_attn_constraint_masks.append(mask)
for i, layer in enumerate(model.decoder.layers):
enc_dec_attn_constraint_mask = enc_dec_attn_constraint_masks[i]
layer.set_buffer('enc_dec_attn_constraint_mask', enc_dec_attn_constraint_mask,
incremental_state)
def should_move_on():
prev_weights = []
for i in range(hparams['dec_layers']):
prev_weight = (all_prev_weights[i] * use_masks[i][None, :, None, None]).sum(dim=1)
prev_weights.append(prev_weight)
prev_weights = sum(prev_weights) / sum([mask.sum() for mask in use_masks])
move_on = (prev_weights[:, -3:].mean(dim=1).gather(1, attn_pos[:, None].long())).squeeze(-1) < 0.7
move_on &= torch.argmax(prev_weights[:, -1], -1) > attn_pos.long()
return attn_pos + move_on.int()
if step > 3 and stage == 1:
attn_pos = should_move_on()
encdec_attn = utils.select_attn(encdec_attn_logits)
if not hparams['profile_infer']:
src_lengths = sample['src_lengths'] - 1 # exclude eos
target_lengths = (1.0 - hit_eos[:, 1:].float()).sum(dim=-1) + 1
src_padding_mask = input.eq(0) | input.eq(self.eos_idx) # also exclude eos
src_seg_mask = input.eq(self.seg_idx)
target_padding_mask = decoded_mel.abs().sum(-1).eq(0)
focus_rate = utils.get_focus_rate(encdec_attn, src_padding_mask, target_padding_mask)
phone_coverage_rate = utils.get_phone_coverage_rate(encdec_attn, src_padding_mask, src_seg_mask,
target_padding_mask)
attn_ks = src_lengths.float() / target_lengths.float()
diagonal_focus_rate, diag_mask = utils.get_diagonal_focus_rate(encdec_attn, attn_ks, target_lengths,
src_padding_mask,
target_padding_mask)
else:
focus_rate, phone_coverage_rate, diagonal_focus_rate = None, None, None
return decoded_mel, encdec_attn.unsqueeze(
1), hit_eos, stop_logits, focus_rate, phone_coverage_rate, diagonal_focus_rate
def estimate_decode_length(self, input_length):
return input_length * 5 + 100
def prepare_vocoder(self):
if self.vocoder is None:
if hparams['vocoder'] == 'pwg':
if hparams['vocoder_ckpt'] == '':
base_dir = 'wavegan_pretrained'
ckpts = glob.glob(f'{base_dir}/checkpoint-*steps.pkl')
ckpt = sorted(ckpts, key=
lambda x: int(re.findall(f'{base_dir}/checkpoint-(\d+)steps.pkl', x)[0]))[-1]
config_path = f'{base_dir}/config.yml'
else:
base_dir = hparams['vocoder_ckpt']
config_path = f'{base_dir}/config.yml'
ckpt = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key=
lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0]))[-1]
print('| load wavegan: ', ckpt)
self.vocoder = load_pwg_model(
config_path=config_path,
checkpoint_path=ckpt,
stats_path=f'{base_dir}/stats.h5',
)
self.denoiser = GeneralDenoiser()
def inv_spec(self, spec, pitch=None, noise_spec=None):
"""
:param spec: [T, 80]
:return:
"""
if hparams['vocoder'] == 'pwg':
wav_out = generate_wavegan(spec, *self.vocoder, profile=hparams['profile_infer'])
if hparams['gen_wav_denoise']:
noise_out = generate_wavegan(noise_spec, *self.vocoder)[None, :] \
if noise_spec is not None else None
wav_out = self.denoiser(wav_out[None, :], noise_out)[0, 0]
wav_out = wav_out.cpu().numpy()
return wav_out
@staticmethod
def save_result(wav_out, mel, prefix, utt_id, text, gen_dir,
pitch=None, noise_spec=None, alignment=None, str_phs=None):
base_fn = f'[{prefix}][{utt_id}]'
base_fn += text.replace(":", "%3A")[:80]
audio.save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'],
norm=hparams['out_wav_norm'])
audio.plot_spec(mel.T, f'{gen_dir}/spec_plot/{base_fn}.png')
if pitch is not None:
audio.plot_curve(pitch, f'{gen_dir}/pitch_plot/{base_fn}.png', 50, 500)
if alignment is not None:
fig, ax = plt.subplots(figsize=(12, 16))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none')
decoded_txt = str_phs.split(" ")
ax.set_yticks(np.arange(len(decoded_txt)))
ax.set_yticklabels(list(decoded_txt), fontsize=6)
fig.colorbar(im, ax=ax)
fig.savefig(f'{gen_dir}/attn_plot/{base_fn}_attn.png', format='png')
plt.close()
def test_end(self, outputs):
if self.saving_result_pool is not None:
self.saving_result_pool.close()
[f.get() for f in tqdm(self.saving_results_futures)]
self.saving_result_pool.join()
return {}
def run_model(self, model, input, mel2ph, spk_embed, target,
return_output=False, ref_mel='tgt', pitch=None, uv=None, energy=None):
hparams['global_steps'] = self.global_step
losses = {}
if ref_mel == 'tgt':
ref_mel = target
output = model(input, mel2ph, spk_embed, ref_mel, pitch, uv, energy)
if hparams['mel_loss'] == 'l1':
losses['mel'] = self.l1_loss(output['mel_out'], target)
if hparams['mel_loss'] == 'mse':
losses['mel'] = self.mse_loss(output['mel_out'], target)
losses['dur'] = self.dur_loss(output['dur'], mel2ph, input)
if hparams['use_pitch_embed']:
p_pred = output['pitch_logits']
losses['uv'], losses['f0'] = self.pitch_loss(p_pred, pitch, uv)
if losses['uv'] is None:
del losses['uv']
if hparams['use_energy_embed']:
losses['energy'] = self.energy_loss(output['energy_pred'], energy)
if not return_output:
return losses
else:
return losses, output
def l1_loss(self, decoder_output, target):
# decoder_output : B x T x n_mel
# target : B x T x n_mel
l1_loss = F.l1_loss(decoder_output, target, reduction='none')
weights = self.weights_nonzero_speech(target)
l1_loss = (l1_loss * weights).sum() / weights.sum()
return l1_loss
def mse_loss(self, decoder_output, target):
# decoder_output : B x T x n_mel
# target : B x T x n_mel
mse_loss = F.mse_loss(decoder_output, target, reduction='none')
weights = self.weights_nonzero_speech(target)
mse_loss = (mse_loss * weights).sum() / weights.sum()
return mse_loss
def dur_loss(self, dur_pred, mel2ph, input, split_pause=False, sent_dur_loss=False):
B, T_t = input.shape
dur_gt = mel2ph.new_zeros(B, T_t + 1).scatter_add(1, mel2ph, torch.ones_like(mel2ph))
dur_gt = dur_gt[:, 1:]
nonpadding = (input != 0).float()
if split_pause:
is_pause = (input == self.phone_encoder.seg()) | (input == self.phone_encoder.unk()) | (
input == self.phone_encoder.eos())
is_pause = is_pause.float()
phone_loss = self.dur_loss_fn(dur_pred, dur_gt, (1 - is_pause) * nonpadding) \
* hparams['lambda_dur']
seg_loss = self.dur_loss_fn(dur_pred, dur_gt, is_pause) \
* hparams['lambda_dur']
return phone_loss, seg_loss
ph_dur_loss = self.dur_loss_fn(dur_pred, dur_gt, nonpadding) * hparams['lambda_dur']
if not sent_dur_loss:
return ph_dur_loss
else:
dur_pred = (dur_pred.exp() - 1).clamp(min=0) * nonpadding
dur_gt = dur_gt.float() * nonpadding
sent_dur_loss = F.l1_loss(dur_pred.sum(-1), dur_gt.sum(-1), reduction='none') / dur_gt.sum(-1)
sent_dur_loss = sent_dur_loss.mean()
return ph_dur_loss, sent_dur_loss
def pitch_loss(self, p_pred, pitch, uv):
assert p_pred[..., 0].shape == pitch.shape
assert p_pred[..., 0].shape == uv.shape
nonpadding = (pitch != -200).float().reshape(-1)
if hparams['use_uv']:
uv_loss = (F.binary_cross_entropy_with_logits(
p_pred[:, :, 1].reshape(-1), uv.reshape(-1), reduction='none') * nonpadding).sum() \
/ nonpadding.sum() * hparams['lambda_uv']
nonpadding = (pitch != -200).float() * (uv == 0).float()
nonpadding = nonpadding.reshape(-1)
else:
pitch[uv > 0] = -4
uv_loss = None
pitch_loss_fn = F.l1_loss if hparams['pitch_loss'] == 'l1' else F.mse_loss
pitch_loss = (pitch_loss_fn(
p_pred[:, :, 0].reshape(-1), pitch.reshape(-1), reduction='none') * nonpadding).sum() \
/ nonpadding.sum() * hparams['lambda_pitch']
return uv_loss, pitch_loss
def energy_loss(self, energy_pred, energy):
nonpadding = (energy != 0).float()
loss = (F.mse_loss(energy_pred, energy, reduction='none') * nonpadding).sum() / nonpadding.sum()
loss = loss * hparams['lambda_energy']
return loss
def test_step(self, sample, batch_idx):
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
input = sample['src_tokens']
if hparams['profile_infer']:
if batch_idx % 10 == 0:
torch.cuda.empty_cache()
mel2ph = sample['mel2ph']
pitch = sample['pitch']
uv = sample['uv']
else:
mel2ph = None
pitch = None
uv = None
with utils.Timer('model_time', print_time=hparams['profile_infer']):
outputs = self.model(input, mel2ph, spk_embed, None, pitch, uv)
# denoise
if hparams['gen_wav_denoise']:
mel2ph_pred = outputs['mel2ph']
input_noise = torch.ones_like(input[:, :1]).long() * 3
mel2ph_noise = torch.ones_like(mel2ph_pred)
mel2ph_noise = mel2ph_noise * (mel2ph_pred > 0).long()
mel2ph_noise = mel2ph_noise[:, :40]
pitch_noise = torch.zeros_like(mel2ph_pred).float()[:, :40]
uv_noise = torch.ones_like(mel2ph_pred)[:, :40]
noise_outputs = self.model(input_noise, mel2ph_noise, spk_embed, None, pitch_noise, uv_noise)
sample['noise_outputs'] = noise_outputs['mel_out']
sample['outputs'] = outputs['mel_out']
sample['pitch_pred'] = outputs.get('pitch')
sample['pitch'] = restore_pitch(sample['pitch'], uv if hparams['use_uv'] else None, hparams)
#if hparams['profile_infer']:
# return {}
return self.after_infer(sample)
def after_infer(self, predictions):
if self.saving_result_pool is None and not hparams['profile_infer']:
self.saving_result_pool = Pool(8)
self.saving_results_futures = []
self.prepare_vocoder()
predictions = utils.unpack_dict_to_list(predictions)
if hparams['show_progress_bar']:
t = tqdm(predictions)
else:
t = predictions
for i, prediction in enumerate(t):
for k, v in prediction.items():
if type(v) is torch.Tensor:
prediction[k] = v.cpu().numpy()
utt_id = prediction.get('utt_id')
text = prediction.get('text')
targets = self.remove_padding(prediction.get("targets"))
outputs = self.remove_padding(prediction["outputs"])
noise_outputs = self.remove_padding(prediction.get("noise_outputs"))
pitch_pred = self.remove_padding(prediction.get("pitch_pred"))
pitch_gt = self.remove_padding(prediction.get("pitch"), -200)
gen_dir = os.path.join(hparams['work_dir'],
f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}')
wav_pred = self.inv_spec(outputs, pitch_pred, noise_outputs)
if not hparams['profile_infer']:
os.makedirs(gen_dir, exist_ok=True)
os.makedirs(f'{gen_dir}/wavs', exist_ok=True)
os.makedirs(f'{gen_dir}/spec_plot', exist_ok=True)
os.makedirs(f'{gen_dir}/pitch_plot', exist_ok=True)
self.saving_results_futures.append(
self.saving_result_pool.apply_async(self.save_result, args=[
wav_pred, outputs, f'P', utt_id, text, gen_dir, [pitch_pred, pitch_gt], noise_outputs]))
wav_gt = self.inv_spec(targets, pitch_gt, noise_outputs)
if targets is not None:
self.saving_results_futures.append(
self.saving_result_pool.apply_async(self.save_result, args=[
wav_gt, targets, 'G', utt_id, text, gen_dir, pitch_gt, noise_outputs]))
t.set_description(
f"Pred_shape: {outputs.shape}, gt_shape: {targets.shape}")
else:
if 'gen_wav_time' not in self.stats:
self.stats['gen_wav_time'] = 0
self.stats['gen_wav_time'] += len(wav_pred) / hparams['audio_sample_rate']
print('gen_wav_time: ', self.stats['gen_wav_time'])
return {}
def remove_padding(self, x, padding_idx=0):
return utils.remove_padding(x, padding_idx)
def weights_nonzero_speech(self, target):
# target : B x T x mel
# Assign weight 1.0 to all labels except for padding (id=0).
dim = target.size(-1)
return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim)
def make_stop_target(self, target):
# target : B x T x mel
seq_mask = target.abs().sum(-1).ne(0).float()
seq_length = seq_mask.sum(1)
mask_r = 1 - utils.sequence_mask(seq_length - 1, target.size(1)).float()
return seq_mask, mask_r
def weighted_cross_entropy_with_logits(self, targets, logits, pos_weight=1):
x = logits
z = targets
q = pos_weight
l = 1 + (q - 1) * z
return (1 - z) * x + l * (torch.log(1 + torch.exp(-x.abs())) + F.relu(-x))
if __name__ == '__main__':
LightSpeechTask.start()
| 37,184 | 44.681818 | 131 | py |
lm-scorer | lm-scorer-master/lm_scorer/models/gpt2.py | from typing import * # pylint: disable=wildcard-import,unused-wildcard-import
import torch
from transformers import AutoTokenizer, GPT2LMHeadModel
from transformers import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
from transformers.tokenization_utils import BatchEncoding
from .abc.transformers import TransformersLMScorer
class GPT2LMScorer(TransformersLMScorer):
# @overrides
def _build(self, model_name: str, options: Dict[str, Any]) -> None:
super()._build(model_name, options)
# pylint: disable=attribute-defined-outside-init
self.tokenizer = AutoTokenizer.from_pretrained(
model_name, use_fast=True, add_special_tokens=False
)
# Add the pad token to GPT2 dictionary.
# len(tokenizer) = vocab_size + 1
self.tokenizer.add_special_tokens({"additional_special_tokens": ["<|pad|>"]})
self.tokenizer.pad_token = "<|pad|>"
self.model = GPT2LMHeadModel.from_pretrained(model_name)
# We need to resize the embedding layer because we added the pad token.
self.model.resize_token_embeddings(len(self.tokenizer))
self.model.eval()
if "device" in options:
self.model.to(options["device"])
def _add_special_tokens(self, text: str) -> str:
return self.tokenizer.bos_token + text + self.tokenizer.eos_token
# @overrides
def _tokens_log_prob_for_batch(
self, text: List[str]
) -> List[Tuple[torch.DoubleTensor, torch.LongTensor, List[str]]]:
outputs: List[Tuple[torch.DoubleTensor, torch.LongTensor, List[str]]] = []
if len(text) == 0:
return outputs
# TODO: Handle overflowing elements for long sentences
text = list(map(self._add_special_tokens, text))
encoding: BatchEncoding = self.tokenizer.batch_encode_plus(
text, return_tensors="pt",
)
with torch.no_grad():
ids = encoding["input_ids"].to(self.model.device)
attention_mask = encoding["attention_mask"].to(self.model.device)
nopad_mask = ids != self.tokenizer.pad_token_id
logits: torch.Tensor = self.model(ids, attention_mask=attention_mask)[0]
for sent_index in range(len(text)):
sent_nopad_mask = nopad_mask[sent_index]
# len(tokens) = len(text[sent_index]) + 1
sent_tokens = [
tok
for i, tok in enumerate(encoding.tokens(sent_index))
if sent_nopad_mask[i] and i != 0
]
# sent_ids.shape = [len(text[sent_index]) + 1]
sent_ids = ids[sent_index, sent_nopad_mask][1:]
# logits.shape = [len(text[sent_index]) + 1, vocab_size]
sent_logits = logits[sent_index, sent_nopad_mask][:-1, :]
sent_logits[:, self.tokenizer.pad_token_id] = float("-inf")
# ids_scores.shape = [seq_len + 1]
sent_ids_scores = sent_logits.gather(1, sent_ids.unsqueeze(1)).squeeze(1)
# log_prob.shape = [seq_len + 1]
sent_log_probs = sent_ids_scores - sent_logits.logsumexp(1)
sent_log_probs = cast(torch.DoubleTensor, sent_log_probs)
sent_ids = cast(torch.LongTensor, sent_ids)
output = (sent_log_probs, sent_ids, sent_tokens)
outputs.append(output)
return outputs
# @overrides
@classmethod
def _supported_model_names(cls) -> Iterable[str]:
return GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys()
| 3,499 | 39.697674 | 85 | py |
lm-scorer | lm-scorer-master/lm_scorer/models/abc/base.py | from typing import * # pylint: disable=wildcard-import,unused-wildcard-import
from abc import ABC, abstractmethod
import math
import torch
class LMScorer(ABC):
def __init__(self, model_name: str, **kwargs: Any) -> None:
self._build(model_name, kwargs)
@overload
def sentence_score(
self, text: str, log: bool = False, reduce: str = "prod"
) -> float:
...
@overload
def sentence_score(
self, text: List[str], log: bool = False, reduce: str = "prod"
) -> List[float]:
...
def sentence_score(
self, text: Union[str, List[str]], log: bool = False, reduce: str = "prod",
) -> Union[float, List[float]]:
sentences = [text] if isinstance(text, str) else text
scores: List[float] = []
if len(sentences) == 0:
return scores
outputs = self._tokens_log_prob(sentences)
for output in outputs:
log_probs = output[0]
tlen = log_probs.shape[0]
if reduce == "prod":
score = log_probs.sum()
elif reduce == "mean":
score = log_probs.logsumexp(0) - math.log(tlen)
elif reduce == "gmean":
score = log_probs.mean(0)
elif reduce == "hmean":
score = log_probs.neg().logsumexp(0).neg() + math.log(tlen)
else:
raise ValueError("Unrecognized scoring strategy: %s" % reduce)
if not log:
score = score.exp()
scores.append(score.item())
return scores[0] if isinstance(text, str) else scores
@overload
def tokens_score(
self, text: str, log: bool = False
) -> Tuple[List[float], List[int], List[str]]:
...
@overload
def tokens_score(
self, text: List[str], log: bool = False
) -> List[Tuple[List[float], List[int], List[str]]]:
...
def tokens_score(
self, text: Union[str, List[str]], log: bool = False
) -> Union[
Tuple[List[float], List[int], List[str]],
List[Tuple[List[float], List[int], List[str]]],
]:
sentences = [text] if isinstance(text, str) else text
outputs: List[Tuple[List[float], List[int], List[str]]] = []
if len(sentences) == 0:
return outputs
for log_probs, ids, tokens in self._tokens_log_prob(sentences):
scores = log_probs if log else log_probs.exp()
scores = cast(torch.DoubleTensor, scores)
output = (scores.tolist(), ids.tolist(), tokens)
outputs.append(output)
return outputs[0] if isinstance(text, str) else outputs
@classmethod
def supported_model_names(cls) -> Iterable[str]:
return cls._supported_model_names()
def _build(self, model_name: str, options: Dict[str, Any]) -> None:
# pylint: disable=attribute-defined-outside-init, unused-argument
self.model_name = model_name
@abstractmethod
def _tokens_log_prob(
self, text: List[str]
) -> List[Tuple[torch.DoubleTensor, torch.LongTensor, List[str]]]:
... # pragma: no cover
@classmethod
@abstractmethod
def _supported_model_names(cls) -> Iterable[str]:
... # pragma: no cover
| 3,273 | 30.480769 | 83 | py |
lm-scorer | lm-scorer-master/lm_scorer/models/abc/batch.py | # pylint: disable=abstract-method
from typing import * # pylint: disable=wildcard-import,unused-wildcard-import
from abc import abstractmethod
import torch
from .base import LMScorer
class BatchedLMScorer(LMScorer):
# @overrides
def _build(self, model_name: str, options: Dict[str, Any]) -> None:
super()._build(model_name, options)
batch_size = options.get("batch_size", 1)
if batch_size < 1:
raise ValueError("The batch_size option must be positive")
# pylint: disable=attribute-defined-outside-init
self.batch_size = batch_size
# @overrides
def _tokens_log_prob(
self, text: List[str]
) -> List[Tuple[torch.DoubleTensor, torch.LongTensor, List[str]]]:
outputs = []
for i in range(0, len(text), self.batch_size):
batch = text[i : i + self.batch_size]
outputs.extend(self._tokens_log_prob_for_batch(batch))
return outputs
@abstractmethod
def _tokens_log_prob_for_batch(
self, text: List[str]
) -> List[Tuple[torch.DoubleTensor, torch.LongTensor, List[str]]]:
... # pragma: no cover
| 1,148 | 30.916667 | 78 | py |
lm-scorer | lm-scorer-master/lm_scorer/bin/cli.py | #!/usr/bin/env python3
from typing import * # pylint: disable=wildcard-import,unused-wildcard-import
import argparse
import itertools
import os
import sys
import torch
from ..models.auto import AutoLMScorer as LMScorer
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Get sentences probability using a language model.",
)
parser.add_argument(
"sentences_file_path",
metavar="sentences-file-path",
type=str,
help="A file containing sentences to score, one per line."
" If - is given as filename it reads from stdin instead.",
)
parser.add_argument(
"--model-name",
"-m",
type=str,
default="gpt2",
help="The pretrained language model to use. Can be one of: %s."
% ", ".join(LMScorer.supported_model_names()),
)
parser.add_argument(
"--tokens",
"-t",
action="store_true",
help="If provided it provides the probability of each token of each sentence.",
)
parser.add_argument(
"--log-prob",
"-lp",
action="store_true",
help="If provided log probabilities are returned instead.",
)
parser.add_argument(
"--reduce",
"-r",
type=str,
default="prod",
help="Reduce strategy applied on token probabilities to get the sentence score."
" Available strategies are: prod, mean, gmean, hmean.",
)
parser.add_argument(
"--batch-size",
"-b",
type=int,
default=1,
help="Number of sentences to process in parallel.",
)
parser.add_argument(
"--significant-figures",
"-sf",
type=int,
default=5,
help="Number of significant figures to use when printing numbers.",
)
parser.add_argument(
"--cuda",
type=int,
default=-1,
help="If provided it runs the model on the given cuda device.",
)
parser.add_argument(
"--debug",
action="store_true",
help="If provided it provides additional logging in case of errors.",
)
return parser.parse_args()
def normalize_args(args: argparse.Namespace) -> None:
if args.sentences_file_path != "-":
args.sentences_file_path = os.path.realpath(args.sentences_file_path)
def validate_args(args: argparse.Namespace) -> None:
if args.sentences_file_path != "-":
if not os.path.isfile(args.sentences_file_path):
raise ValueError("The provided sentences file path is invalid.")
if args.cuda >= 0 and not torch.cuda.is_available():
raise ValueError("No Cuda device found.")
if args.cuda >= torch.cuda.device_count():
device_count = torch.cuda.device_count()
raise ValueError("Invalid Cuda device: %d/%d." % (args.cuda, device_count))
if args.batch_size <= 0:
raise ValueError("The batch size must be positive.")
if args.significant_figures <= 0:
raise ValueError("The number of significant figures must be positive.")
T1 = TypeVar("T1") # pylint: disable=invalid-name
def grouper(iterable: Iterable[T1], size: int) -> Generator[List[T1], None, None]:
it = iter(iterable) # pylint: disable=invalid-name
while True:
chunk = list(itertools.islice(it, size))
if not chunk:
return
yield chunk
def main(args: argparse.Namespace) -> None:
# pylint: disable=too-many-locals
if args.sentences_file_path == "-":
sentences_stream = sys.stdin
else:
sentences_stream = open(args.sentences_file_path, "r")
sig_fig = args.significant_figures
batch_size = args.batch_size
device = torch.device("cuda:%d" % args.cuda if args.cuda >= 0 else "cpu")
scorer = LMScorer.from_pretrained(
args.model_name, device=device, batch_size=batch_size
)
buffer_size = args.batch_size * 2
for sentences in grouper(sentences_stream, buffer_size):
sentences = [sentence.strip() for sentence in sentences]
sent_scores = scorer.sentence_score(
sentences, log=args.log_prob, reduce=args.reduce
)
if args.tokens:
sent_info = scorer.tokens_score(sentences, log=args.log_prob)
sent_num = len(sentences)
for i in range(sent_num):
sentence, sent_score = sentences[i], sent_scores[i]
print(f"%s\t%.{sig_fig}g" % (sentence, sent_score))
if args.tokens:
scores, _, tokens = sent_info[i]
for score, token in zip(scores, tokens):
print(f"%s\t%.{sig_fig}g" % (token, score))
print("")
if args.sentences_file_path != "-":
sentences_stream.close()
def run() -> None:
try:
args = parse_args()
normalize_args(args)
validate_args(args)
main(args)
except KeyboardInterrupt:
print("\nAborted!")
except Exception as err: # pylint: disable=broad-except
if args.debug:
raise
print("Error: %s" % err)
if __name__ == "__main__":
run()
| 5,142 | 28.728324 | 88 | py |
lm-scorer | lm-scorer-master/tests/unit/models/abc/test_base.py | # pylint: disable=missing-module-docstring,missing-function-docstring,unused-variable,too-many-locals,too-many-statements
import math
import pytest # pylint: disable=unused-import
import scipy
import torch
from lm_scorer.models.abc.base import LMScorer
def model(text):
tokens = ["START"] + text.split(" ")
scores = [float(-i) for i in range(len(tokens))]
ids = list(range(len(tokens)))
return scores, ids, tokens
class FixtureLMScorer(LMScorer):
def _tokens_log_prob(self, text):
outputs = []
for sentence in text:
scores, ids, tokens = model(sentence)
# pylint: disable=not-callable
scores = torch.tensor(scores)
# pylint: disable=not-callable
ids = torch.tensor(ids)
output = (scores, ids, tokens)
outputs.append(output)
return outputs
@classmethod
def _supported_model_names(cls):
return iter([])
def fix_sentence_score(self, text, reduce="prod", log=False):
sentences = [text] if isinstance(text, str) else text
scores = []
if len(sentences) == 0:
return scores
outputs = self._tokens_log_prob(sentences)
for output in outputs:
log_probs = output[0]
probs = log_probs.exp()
if reduce == "prod":
score = probs.prod()
elif reduce == "mean":
score = probs.mean()
elif reduce == "gmean":
# pylint: disable=not-callable
score = torch.tensor(scipy.stats.gmean(probs.numpy()))
elif reduce == "hmean":
# pylint: disable=not-callable
score = torch.tensor(scipy.stats.hmean(probs.numpy()))
else:
raise ValueError("Unrecognized scoring strategy: %s" % reduce)
if log:
score = score.log()
scores.append(score.item())
return scores[0] if isinstance(text, str) else scores
def fix_tokens_score(self, text, log=False):
sentences = [text] if isinstance(text, str) else text
outputs = []
if len(sentences) == 0:
return outputs
for log_probs, ids, tokens in self._tokens_log_prob(sentences):
scores = log_probs if log else log_probs.exp()
output = (scores.tolist(), ids.tolist(), tokens)
outputs.append(output)
return outputs[0] if isinstance(text, str) else outputs
def describe_sentence_score():
scorer = FixtureLMScorer("")
def should_return_the_correct_number_of_results():
scores = scorer.sentence_score([])
assert isinstance(scores, list) and len(scores) == 0
scores = scorer.sentence_score(["A"])
assert isinstance(scores, list) and len(scores) == 1
scores = scorer.sentence_score(["A", "B"])
assert isinstance(scores, list) and len(scores) == 2
scores = scorer.sentence_score("A")
assert isinstance(scores, float)
def should_correctly_compute_every_reduction_strategy():
text = "Hello World"
eps = 1e-6
strategies = ["prod", "mean", "gmean", "hmean"]
for strategy in strategies:
for log in [False, True]:
context = (strategy, log)
score = scorer.sentence_score("", reduce=strategy, log=log)
expected = scorer.fix_sentence_score("", reduce=strategy, log=log)
assert isinstance(score, float)
assert math.isclose(score, expected, rel_tol=eps), context
score = scorer.sentence_score(text, reduce=strategy, log=log)
expected = scorer.fix_sentence_score(text, reduce=strategy, log=log)
context = ((strategy, log), (score, expected))
assert isinstance(score, float)
assert math.isclose(score, expected, rel_tol=eps), context
def describe_tokens_score():
scorer = FixtureLMScorer("")
def should_return_the_correct_number_of_results():
info = scorer.tokens_score([])
assert isinstance(info, list) and len(info) == 0
info = scorer.tokens_score(["A"])
assert isinstance(info, list) and len(info) == 1
info = scorer.tokens_score(["A", "B"])
assert isinstance(info, list) and len(info) == 2
info = scorer.tokens_score("A")
assert isinstance(info, tuple) and len(info) == 3
| 4,467 | 35.325203 | 121 | py |
espressopp | espressopp-master/src/external/transformations.py | """
***********************************
espressopp.external.transformations
***********************************
Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Authors:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`__,
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2011.01.25
**Requirements**
* `Python 2.6 or 3.1 <http://www.python.org>`__
* `Numpy 1.5 <http://numpy.scipy.org>`__
* `transformations.c 2010.04.10 <http://www.lfd.uci.edu/~gohlke/>`__
(optional implementation of some functions in C)
**Notes**
The API is not stable yet and is expected to change between revisions.
This Python code is not optimized for speed. Refer to the transformations.c
module for a faster implementation of some functions.
Documentation in HTML format can be generated with epydoc.
Matrices (M) can be inverted using numpy.linalg.inv(M), concatenated using
numpy.dot(M0, M1), or used to transform homogeneous coordinates (v) using
numpy.dot(M, v) for shape (4, \*) "point of arrays", respectively
numpy.dot(v, M.T) for shape (\*, 4) "array of points".
Use the transpose of transformation matrices for OpenGL glMultMatrixd().
Calculations are carried out with numpy.float64 precision.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions w+ix+jy+kz are represented as [w, x, y, z].
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
**References**
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
(13) Quaternion in molecular modeling. CFF Karney.
J Mol Graph Mod, 25(5):595-604
(14) New method for extracting the quaternion from a rotation matrix.
Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
Examples
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix((1, 2, 3))
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, (1, 2, 3))
True
>>> numpy.allclose(shear, (0, math.tan(beta), 0))
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
>>> v0, v1 = random_vector(3), random_vector(3)
>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
>>> v2 = numpy.dot(v0, M[:3,:3].T)
>>> numpy.allclose(unit_vector(v1), unit_vector(v2))
True
"""
from __future__ import division, print_function
import sys
import os
import warnings
import math
import numpy
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64))
True
"""
return numpy.identity(4, dtype=numpy.float64)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2.0, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [ 1., -1., 0., 1.])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.array(((cosa, 0.0, 0.0),
(0.0, cosa, 0.0),
(0.0, 0.0, cosa)), dtype=numpy.float64)
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array((( 0.0, -direction[2], direction[1]),
( direction[2], 0.0, -direction[0]),
(-direction[1], direction[0], 0.0)),
dtype=numpy.float64)
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v[3] = 1.0
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.array(((factor, 0.0, 0.0, 0.0),
(0.0, factor, 0.0, 0.0),
(0.0, 0.0, factor, 0.0),
(0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64)
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix((0, 0, 0), (1, 0, 0))
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0))
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3.0-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustrum.
The frustrum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustrum.
If perspective is True the frustrum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (devided by w coordinate).
>>> frustrum = numpy.random.rand(6)
>>> frustrum[1] += frustrum[0]
>>> frustrum[3] += frustrum[2]
>>> frustrum[5] += frustrum[4]
>>> M = clip_matrix(perspective=False, *frustrum)
>>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(perspective=True, *frustrum)
>>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustrum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustrum: near <= 0")
t = 2.0 * near
M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0),
(0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0),
(0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)),
(0.0, 0.0, -1.0, 0.0))
else:
M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)),
(0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)),
(0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)),
(0.0, 0.0, 0.0, 1.0))
return numpy.array(M, dtype=numpy.float64)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1.0, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix((1, 2, 3))
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0, 0, 0, 1
if not numpy.linalg.det(P):
raise ValueError("matrix is singular")
scale = numpy.zeros((3, ), dtype=numpy.float64)
shear = [0, 0, 0]
angles = [0, 0, 0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0, 0, 0, 1
else:
perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64)
translate = M[3, :3].copy()
M[3, :3] = 0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
scale *= -1
row *= -1
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.))
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array((
( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0),
(-a*sinb*co, b*sina, 0.0, 0.0),
( a*cosb, b*cosa, c, 0.0),
( 0.0, 0.0, 0.0, 1.0)),
dtype=numpy.float64)
def superimposition_matrix(v0, v1, scaling=False, usesvd=True):
"""Return matrix to transform given vector set into second vector set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 vectors.
If usesvd is True, the weighted sum of squared deviations (RMSD) is
minimized according to the algorithm by W. Kabsch [8]. Otherwise the
quaternion based algorithm by B. Horn [9] is used (slower when using
this Python implementation).
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1))
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0.0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scaling=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3), dtype=numpy.float64)
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
if v0.shape != v1.shape or v0.shape[1] < 3:
raise ValueError("vector sets are of wrong shape or type")
# move centroids to origin
t0 = numpy.mean(v0, axis=1)
t1 = numpy.mean(v1, axis=1)
v0 = v0 - t0.reshape(3, 1)
v1 = v1 - t1.reshape(3, 1)
if usesvd:
# Singular Value Decomposition of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, 2], vh[2, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(4)
M[:3, :3] = R
else:
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = ((xx+yy+zz, 0.0, 0.0, 0.0),
(yz-zy, xx-yy-zz, 0.0, 0.0),
(zx-xz, xy+yx, -xx+yy-zz, 0.0),
(xy-yx, zx+xz, yz+zy, -xx-yy+zz))
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
# scale: ratio of rms deviations from centroid
if scaling:
v0 *= v0
v1 *= v1
M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# translation
M[:3, 3] = t1
T = numpy.identity(4)
T[:3, 3] = -t0
M = numpy.dot(M, T)
return M
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
quaternion = numpy.empty((4, ), dtype=numpy.float64)
if repetition:
quaternion[0] = cj*(cc - ss)
quaternion[i] = cj*(cs + sc)
quaternion[j] = sj*(cc + ss)
quaternion[k] = sj*(cs - sc)
else:
quaternion[0] = cj*cc + sj*ss
quaternion[i] = cj*sc - sj*cs
quaternion[j] = cj*ss + sj*cc
quaternion[k] = cj*cs - sj*sc
if parity:
quaternion[j] *= -1
return quaternion
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, (1, 0, 0))
>>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
True
"""
quaternion = numpy.zeros((4, ), dtype=numpy.float64)
quaternion[1] = axis[0]
quaternion[2] = axis[1]
quaternion[3] = axis[2]
qlen = vector_norm(quaternion)
if qlen > _EPS:
quaternion *= math.sin(angle/2.0) / qlen
quaternion[0] = math.cos(angle/2.0)
return quaternion
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, (1, 0, 0)))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, identity_matrix())
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True)
nq = numpy.dot(q, q)
if nq < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0),
( q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0),
( q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise=True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(identity_matrix(), True)
>>> numpy.allclose(q, [1., 0., 0., 0.])
True
>>> q = quaternion_from_matrix(numpy.diag([1., -1., -1., 1.]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ), dtype=numpy.float64)
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array(((m00-m11-m22, 0.0, 0.0, 0.0),
(m01+m10, m11-m00-m22, 0.0, 0.0),
(m02+m20, m12+m21, m22-m00-m11, 0.0),
(m21-m12, m02-m20, m10-m01, m00+m11+m22)))
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
q *= -1.0
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> numpy.allclose(q, [28, -44, -14, 48])
True
"""
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return numpy.array((
-x1*x0 - y1*y0 - z1*z0 + w1*w0,
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0), dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
return numpy.array((quaternion[0], -quaternion[1],
-quaternion[2], -quaternion[3]), dtype=numpy.float64)
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion)
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3.0, 0.0, 1.0, 2.0])
3.0
"""
return quaternion[0]
def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3.0, 0.0, 1.0, 2.0])
[0.0, 1.0, 2.0]
"""
return quaternion[1:4]
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0.0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1.0, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
q1 *= -1.0
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1.0, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> len(q.shape), q.shape[0]==4
(1, True)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array((numpy.cos(t2)*r2,
numpy.sin(t1)*r1,
numpy.cos(t1)*r1,
numpy.sin(t2)*r2), dtype=numpy.float64)
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rnd: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[1, 0, 0, 0])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1,1,0], [-1, 1, 0])
>>> ball.setconstrain(True)
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64)
self._constrain = False
if initial is None:
self._qdown = numpy.array([1, 0, 0, 0], dtype=numpy.float64)
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
def setconstrain(self, constrain):
"""Set state of constrain to axis mode."""
self._constrain = constrain == True
def getconstrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v = numpy.array(((point[0] - center[0]) / radius,
(center[1] - point[1]) / radius,
0.0), dtype=numpy.float64)
n = v[0]*v[0] + v[1]*v[1]
if n > 1.0:
v /= math.sqrt(n) # position outside of sphere
else:
v[2] = math.sqrt(1.0 - n)
return v
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
v *= -1.0
v /= n
return v
if a[2] == 1.0:
return numpy.array([1, 0, 0], dtype=numpy.float64)
return unit_vector([-a[1], a[0], 0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. eucledian norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3), dtype=numpy.float64)
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1.0])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. eucledian norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64)
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1.0]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0.0) and numpy.all(v < 1.0)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> numpy.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return numpy.cross(v0, v1, axis=axis)
def angle_between_vectors(v0, v1, directed=True, axis=0):
"""Return angle between vectors.
If directed is False, the input vectors are interpreted as undirected axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0., 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in xrange(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def _import_module(module_name, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
sys.path.append(os.path.dirname(__file__))
try:
module = __import__(module_name)
except ImportError:
sys.path.pop()
if warn:
warnings.warn("failed to import module " + module_name)
else:
sys.path.pop()
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("no Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
# _import_module('_transformations')
# Documentation in HTML format can be generated with Epydoc
__docformat__ = "restructuredtext en"
if __name__ == "__main__":
import doctest
import random # used in doctests
numpy.set_printoptions(suppress=True, precision=5)
doctest.testmod()
| 61,424 | 32.602298 | 79 | py |
espressopp | espressopp-master/doc/ug/conf.py | # -*- coding: utf-8 -*-
#
# ESPResSo++ documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 23 13:11:32 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import datetime
# Gets version directly from the code
on_espp_server = os.environ.get('HOME')=='home/espressopp'
if on_espp_server:
try:
import espressopp
ver = espressopp.Version()
ESPP_VERSION = 'Release {}.{}.{}'.format(
ver.major, ver.minor, ver.patchlevel)
except ImportError:
ESPP_VERSION = 'Release X.X.X'
else:
ESPP_VERSION = 'latest'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration -----------------------------------------------------
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.autosummary',
'ipython_console_highlighting'
]
# Not yet: numpydoc
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ESPResSo++'
copyright = '2013-{}, Max Planck Institute for Polymer Research'.format(
datetime.date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ESPP_VERSION
# The full version, including alpha/beta/rc tags.
release = ESPP_VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'basic'
# Find sphinx_rtd_theme package
html_theme = 'sphinx_rtd_theme'
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{} v{}'.format(project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo_theory_group.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon_blue.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
#html_style = 'default.css'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = { '**': ['globaltoc.html', 'custom_links_sidebar.html', 'searchbox.html'] }
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ESPResSo++_doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index_latex', 'ESPResSo++.tex', u'ESPResSo++ Documentation',
u'Developer team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/espp_logo_blue.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 7,631 | 32.038961 | 92 | py |
Dataset-3DPOP | Dataset-3DPOP-main/Examples/SampleTrainingImages.py | # !/usr/bin/env python3
"""Sample images and save annotaitons to json to be read by pytorch dataloader"""
import sys
sys.path.append("./")
from POP3D_Reader import Trial
import os
import cv2
import numpy as np
import math
import pandas as pd
import random
from tqdm import tqdm
import json
random.seed(10)
def GetInstancePerTrial(MetaDataDir,DatasetDir,TrainNum,ValRatio,TestRatio):
"""
Given number of training images, determine how many images to sample for each trial
* Assume equal number of images for each individual num (1,2,5,10)
"""
Metadf = pd.read_csv(MetaDataDir,dtype=str)
#Types of individual number:
IndividualNumType = sorted(list(set(Metadf["IndividualNum"].to_list())))
NumPerType = math.ceil(TrainNum/len(IndividualNumType))
print("Number of images per individual number types: %i" %(NumPerType))
TotalNumToSampleDict = {"Train":NumPerType,"Val":NumPerType*ValRatio,"Test":NumPerType*TestRatio}
##Prepare out dicts
TrainImgtoSampleDict = {}
ValImgtoSampleDict = {}
TestImgtoSampleDict = {}
ImgtoSampleDicts = {"Train":TrainImgtoSampleDict,"Val":ValImgtoSampleDict,"Test":TestImgtoSampleDict}
##ImageSampled:
TrainImgSampled = {'1':0,'2':0,'5':0,'10':0}
ValImgSampled ={'1':0,'2':0,'5':0,'10':0}
TestImgSampled = {'1':0,'2':0,'5':0,'10':0}
ImgSampledDict = {"Train":TrainImgSampled,"Val":ValImgSampled,"Test":TestImgSampled}
Types = ["Train", "Val","Test"]
for IndNum in IndividualNumType:
#Subset df for this ind number
IndDF = Metadf.loc[Metadf["IndividualNum"] == IndNum]
ImgtoSampleTrain = math.ceil(NumPerType/len(IndDF))
ImgtoSampleVal = math.ceil(ImgtoSampleTrain*ValRatio)
ImgtoSampleTest = math.ceil(ImgtoSampleTrain*TestRatio)
#Number of images to sample for each type:
TypeNums = {"Train":ImgtoSampleTrain,"Val":ImgtoSampleVal,"Test":ImgtoSampleTest}
TotalNumCounter = {"Train":0, "Val":0, "Test":0} #Counter to keep track of total frames sampled for certain type
print("Calculating %s Individuals:"%IndNum)
for index, row in tqdm(IndDF.iterrows()):
PigeonTrial = Trial.Trial(DatasetDir,row["Sequence"])
for Type in Types:
PigeonTrial.load3DPopTrainingSet(Filter = True, Type = Type)
#Find frames where all camera views no NA
FramesList = []
for camObj in PigeonTrial.camObjects:
FramesList.append(camObj.Keypoint2D.dropna(axis=0)["frame"].to_list())
NoNAFrameList = sorted(list(set(FramesList[0]) & set(FramesList[1]) & set(FramesList[2])& set(FramesList[3])))
# if IndNum == "10":
# import ipdb;ipdb.set_trace()
if IndNum == "10" and row["Sequence"] == "59": #if 10 just sample all
#Sample all:
# ImgtoSampleDicts[Type].update({int(row["Sequence"]):len(NoNAFrameList)})
# TotalNumCounter[Type] += len(NoNAFrameList)
# ImgSampledDict[Type][IndNum] += len(NoNAFrameList)
##Smaple till enough:
# import ipdb;ipdb.set_trace()
SampleValue = (TotalNumToSampleDict[Type]- TotalNumCounter[Type])
ImgtoSampleDicts[Type].update({int(row["Sequence"]):(SampleValue)})
TotalNumCounter[Type] +=SampleValue
ImgSampledDict[Type][IndNum] +=SampleValue
continue
if len(NoNAFrameList) > TypeNums[Type]: #if have plenty frames to sample from
ImgtoSampleDicts[Type].update({int(row["Sequence"]):TypeNums[Type]})
TotalNumCounter[Type]+=TypeNums[Type]
ImgSampledDict[Type][IndNum] += TypeNums[Type]
elif TotalNumCounter[Type] + len(NoNAFrameList) > TotalNumToSampleDict[Type]: #if after this trial can have enough, dont sample all, just get enough
SampleValue = (TotalNumToSampleDict[Type]- TotalNumCounter[Type])
ImgtoSampleDicts[Type].update({int(row["Sequence"]):(SampleValue)})
TotalNumCounter[Type] +=SampleValue
ImgSampledDict[Type][IndNum] +=SampleValue
else: #Else just sample all
ImgtoSampleDicts[Type].update({int(row["Sequence"]):len(NoNAFrameList)})
TotalNumCounter[Type] += len(NoNAFrameList)
ImgSampledDict[Type][IndNum] += len(NoNAFrameList)
for Type in Types:
print(Type)
print("Approx Image to sample: %s"%(TotalNumToSampleDict[Type]))
print("Total Images: %s"%ImgSampledDict[Type])
# import ipdb;ipdb.set_trace()
return ImgtoSampleDicts
def SaveImages(PigeonTrial, RandomFrames,OutDir,Keypoints,Type, MasterIndexCounter,DictList2D,DictList3D):
"""For a trial, save frames into output directory and return annotation dict list for 2D and 3D"""
CamObjList = []
CapList = []
SaveDirList = []
for camObj in PigeonTrial.camObjects:
CamObjList.append(camObj)
CapList.append(cv2.VideoCapture(camObj.VideoPath))
SaveDirList.append(os.path.join(OutDir,camObj.CamName))
SeqName = PigeonTrial.TrialName
counter = 0
while True:
# print(counter)
FrameList = [cap.read() for cap in CapList]
# print(ret)
if FrameList[0][0] == True:
if counter in RandomFrames:
##Frame included in sampled frames:
BBoxDataList = []
Data2DList = []
for x in range(len(CamObjList)):
cv2.imwrite(os.path.join(SaveDirList[x],"%s-F%s.jpg"%(SeqName,counter)),FrameList[x][1])
#BBox Data
BBoxData = {ID:list(CamObjList[x].GetBBoxData(CamObjList[x].BBox, counter, ID)) for ID in PigeonTrial.Subjects}
BBoxData = {ID:[val[0][0],val[0][1],val[1][0],val[1][1]] for ID,val in BBoxData.items()}
BBoxDataList.append(BBoxData)
#2D Data
Data2D = {ID:CamObjList[x].Read2DKeypointData(CamObjList[x].Keypoint2D, counter, ID,Keypoints,StripName=True) for ID in PigeonTrial.Subjects}
Data2DList.append(Data2D)
##3D Data
Data3D = {ID:CamObjList[x].Read3DKeypointData(CamObjList[x].Keypoint3D, counter, ID,Keypoints,StripName=True) for ID in PigeonTrial.Subjects}
CameraDictList = []
for x in range(len(CamObjList)):
CameraDict = {}
CameraDict["CamName"] = CamObjList[x].CamName
CameraDict["Path"]=os.path.join(Type,CamObjList[x].CamName,"%s-F%s.jpg"%(SeqName,counter))
CameraDict["BBox"]=BBoxDataList[x]
CameraDict["Keypoint2D"]= Data2DList[x]
CameraDictList.append(CameraDict)
##Save 3D all data
DictList3D.append({
"Image-ID" : MasterIndexCounter,
"BirdID" : PigeonTrial.Subjects,
"Keypoint3D": Data3D,
"CameraData": CameraDictList
})
#2D Data, sample random view between all cameras
RandomCamIndex = random.sample(list(range(len(CamObjList))),1)[0]
SaveImgPath = os.path.join(OutDir,"MixedViews","%s-%s-F%s.jpg"%(CamObjList[RandomCamIndex].CamName, SeqName,counter))
cv2.imwrite(SaveImgPath,FrameList[RandomCamIndex][1])
DictList2D.append({
"Image-ID" : MasterIndexCounter,
"BirdID" : PigeonTrial.Subjects,
"Path" : os.path.join(Type,"MixedViews","%s-%s-F%s.jpg"%(CamObjList[RandomCamIndex].CamName, SeqName,counter)),
"Keypoint3D": Data3D,
"Keypoint2D": Data2DList[RandomCamIndex],
"BBox":BBoxDataList[RandomCamIndex]
})
# import ipdb;ipdb.set_trace()
MasterIndexCounter +=1
counter +=1
# print(counter)
elif counter == 0:
import ipdb;ipdb.set_trace()
print("weird, cant read first frame from video")
continue
else:
#end of video, write video
break
Release = [cap.release() for cap in CapList]
return DictList3D,DictList2D,MasterIndexCounter
#Temp arguments:
# DatasetDir = DatasetDir
# OutDir = TrainDir
# ImgDict = TrainImgtoSampleDict
# AnnotationDir = AnnotationDir
# Type = "Train"
def SampleImages(DatasetDir,OutDir,ImgDict, AnnotationDir, Keypoints,Type):
"""
Sample images for a type (train/val/test) and save annotation as json
Extracts both 3D and 2D ground truth
"""
if not os.path.exists(os.path.join(OutDir,"Cam1")):
os.mkdir(os.path.join(OutDir,"Cam1"))
os.mkdir(os.path.join(OutDir,"Cam2"))
os.mkdir(os.path.join(OutDir,"Cam3"))
os.mkdir(os.path.join(OutDir,"Cam4"))
os.mkdir(os.path.join(OutDir,"MixedViews"))
else:
print("Directories already exist! Ensure Folders are cleared!!")
MasterIndexCounter = 0
DictList2D = []
DictList3D = []
for Seq, NumImg in tqdm(ImgDict.items()):
if NumImg == 0:
continue
PigeonTrial = Trial.Trial(DatasetDir,Seq)
PigeonTrial.load3DPopTrainingSet(Filter = True, Type = Type)
#Find frames where all camera views no NA
FramesList = []
for camObj in PigeonTrial.camObjects:
FramesList.append(camObj.Keypoint2D.dropna(axis=0)["frame"].to_list())
# import ipdb;ipdb.set_trace()
NoNAFrameList = sorted(list(set(FramesList[0]) & set(FramesList[1]) & set(FramesList[2])& set(FramesList[3])))
# if len(NoNAFrameList) == 0:
# continue
if (NumImg/len(NoNAFrameList))*100 > 100: #if not enough images, just get all images from that trial
import ipdb;ipdb.set_trace()
print(Seq)
print("Sampling %s %% of frames present in sequence" %((NumImg/len(NoNAFrameList))*100))
RandomFrames = sorted(random.sample(NoNAFrameList,int(NumImg)))
DictList3D, DictList2D,MasterIndexCounter = SaveImages(PigeonTrial, RandomFrames,OutDir,Keypoints,Type, MasterIndexCounter,DictList2D,DictList3D)
# import ipdb;ipdb.set_trace()
OutputDict3D = {
"info" : {
"Description":"Sampled 3D ground truth Data from 3D-POP dataset",
"Collated by": "Alex Chan",
"Date":"06/02/2023",
"Keypoints": Keypoints,
"TotalImages": sum(list(ImgDict.values()))
},
"Annotations":DictList3D}
with open(os.path.join(AnnotationDir,"%s-3D.json"%Type), "w") as outfile:
json.dump(OutputDict3D, outfile, indent=4)
OutputDict2D = {
"info" : {
"Description":"Sampled 2D ground truth Data from 3D-POP dataset",
"Collated by": "Alex Chan",
"Date":"06/02/2023",
"Keypoints": Keypoints,
"TotalImages": sum(list(ImgDict.values()))
},
"Annotations":DictList2D}
with open(os.path.join(AnnotationDir,"%s-2D.json"%Type), "w") as outfile:
json.dump(OutputDict2D, outfile,indent=4)
###Temp function to move all calibration info to another folder
def CopyCalibrationFiles(DatasetDir,OutputDir,MetaDataDir):
DatasetDir = "/media/alexchan/My Passport/Pop3D-Dataset_Final/"
OutputDir = "/media/alexchan/My Passport/Pop3D-Dataset_Final/ImageTrainingData/N5000/Calibration/"
MetaDataDir = os.path.join(DatasetDir,"Pop3DMetadata.csv")
Metadf = pd.read_csv(MetaDataDir,dtype=str)
if not os.path.exists(OutputDir):
os.mkdir(OutputDir)
for Seq in Metadf["Sequence"].tolist():
PigeonSeq = Trial.Trial(DatasetDir, int(Seq))
FileNames = PigeonSeq.GenerateFileNames()
FilesCopy = FileNames["IntrinsicPaths"] + FileNames["ExtrinsicPaths"]
import shutil
for file in FilesCopy:
shutil.copy(file,OutputDir )
def main(DatasetDir,OutputDir,TrainNum,ValRatio,TestRatio,Keypoints):
MetaDataDir = os.path.join(DatasetDir,"Pop3DMetadata.csv")
ImgtoSampleDicts= GetInstancePerTrial(MetaDataDir,DatasetDir,TrainNum,ValRatio,TestRatio)
TrainImgtoSampleDict = ImgtoSampleDicts["Train"]
ValImgtoSampleDict = ImgtoSampleDicts["Val"]
TestImgtoSampleDict = ImgtoSampleDicts["Test"]
TrainDir = os.path.join(OutputDir,"Train")
ValDir = os.path.join(OutputDir,"Val")
TestDir = os.path.join(OutputDir,"Test")
AnnotationDir = os.path.join(OutputDir, "Annotation")
if not os.path.exists(TrainDir):
os.mkdir(TrainDir)
os.mkdir(ValDir)
os.mkdir(TestDir)
os.mkdir(AnnotationDir)
# SampleImages(DatasetDir,TrainDir,TrainImgtoSampleDict,AnnotationDir,Keypoints,Type = "Train")
# SampleImages(DatasetDir,ValDir,ValImgtoSampleDict,AnnotationDir,Keypoints,Type = "Val")
SampleImages(DatasetDir,TestDir,TestImgtoSampleDict,AnnotationDir,Keypoints,Type = "Test")
if __name__ == "__main__":
DatasetDir = "/media/alexchan/My Passport/Pop3D-Dataset_Final/"
OutputDir = "/home/alexchan/Documents/SampleDatasets/ImageTrainingData/N100"
Keypoints = ["hd_beak","hd_leftEye","hd_rightEye","hd_nose","bp_leftShoulder","bp_rightShoulder","bp_topKeel","bp_bottomKeel","bp_tail"]
TrainNum = 100 #Number of training images
ValRatio = 0.2 #ratio for validation and test
TestRatio = 0.1
main(DatasetDir,OutputDir,TrainNum,ValRatio,TestRatio,Keypoints)
| 14,223 | 40.228986 | 164 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/test.py | import os
from skimage import io, transform
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms#, utils
# import torch.optim as optim
import numpy as np
from PIL import Image
import glob
import cv2
from dataset import ihd_dataset
from train import Trainer
from options import ArgsParser
from dataset.ihd_dataset import IhdDataset
from dataset.multi_objects_ihd_dataset import MultiObjectsIhdDataset
from evaluation.metrics import MAE, FScore, compute_IoU, normPRED, compute_mAP, AverageMeter
import warnings
warnings.filterwarnings("ignore")
def tensor2np(x, isMask=False):
if isMask:
if x.shape[1] == 1:
x = x.repeat(1,3,1,1)
x = ((x.cpu().detach()))*255
else:
x = x.cpu().detach()
mean = torch.zeros_like(x)
std = torch.zeros_like(x)
mean[:,0,:,:] = 0.485
mean[:,1,:,:] = 0.456
mean[:,2,:,:] = 0.406
std[:,0,:,:] = 0.229
std[:,1,:,:] = 0.224
std[:,2,:,:] = 0.225
x = (x * std + mean)*255
return x.numpy().transpose(0,2,3,1).astype(np.uint8)
def save_output(preds, gts, save_dir, img_fn, extra_infos=None, verbose=False, alpha=0.5):
outs = []
input = gts['inharmonious']
mask_label = gts['mask_gt']
mask_pred = preds['mask_main']
retouched_img = preds['retouched_img']
guide_map = preds['guide_map']
input = cv2.cvtColor(tensor2np(input)[0], cv2.COLOR_RGB2BGR)
retouched_img = cv2.cvtColor(tensor2np(retouched_img, isMask=True)[0], cv2.COLOR_RGB2BGR)
guide_map = ((((guide_map * 0.5) + 0.5)*255).cpu().detach().repeat(1,3,1,1).numpy().transpose(0,2,3,1).astype(np.uint8))[0]
mask_label = tensor2np(mask_label, isMask=True)[0]
outs += [input, mask_label]
outs += [retouched_img]
outs += [tensor2np(mask_main[0], isMask=True)[0]]
outimg = np.concatenate(outs, axis=1)
if verbose==True:
print("show")
cv2.imshow("out",outimg)
cv2.waitKey(0)
else:
sub_key = os.path.split(img_fn)[1][0]
if sub_key == 'a': sub_dir = 'adobe'
if sub_key == 'f': sub_dir = 'flickr'
if sub_key == 'd': sub_dir = 'day2night'
if sub_key == 'c': sub_dir = 'coco'
save_dir = os.path.join(save_dir, sub_dir)
if not os.path.exists(save_dir): os.makedirs(save_dir)
img_fn = os.path.split(img_fn)[1]
prefix,suffix = os.path.splitext(img_fn)
# cv2.imwrite(os.path.join(save_dir, "{}_f1{:.4f}_iou{:.4f}{}".format(prefix, extra_infos['f1'], extra_infos['iou'], suffix)), outimg)
cv2.imwrite(os.path.join(save_dir, "{}".format(img_fn)), outimg)
# --------- 2. dataloader ---------
#1. dataload
opt = ArgsParser()
opt.phase = 'test'
test_inharm_dataset = IhdDataset(opt)
# test_inharm_dataset = MultiObjectsIhdDataset(opt)
test_inharm_dataloader = DataLoader(test_inharm_dataset, batch_size=1,shuffle=False,num_workers=1)
# --------- 3. model define ---------
print("...load MadisNet...")
checkpoints_dir = opt.checkpoints_dir
prediction_dir = os.path.join(opt.checkpoints_dir, "rst")
if not os.path.exists(prediction_dir): os.makedirs(prediction_dir)
opt.is_train = 0
trainer = Trainer(opt)
trainer.resume(opt.resume, preference=['ihdrnet', 'g', 'domain_encoder'])
device = trainer.device
# ------------ Global Evaluation Metrics -------------
total_iters = 0
gmAP_meter = AverageMeter()
gF1_meter = AverageMeter()
gIoU_meter = AverageMeter()
# ------------- Sub-dataset Metrics ----------
sub_dataset = ['HAdobe', 'HCOCO', 'HDay2Night', 'HFlickr']
lmAP_meters = {k:AverageMeter() for k in sub_dataset}
lF1_meters = {k:AverageMeter() for k in sub_dataset}
lIoU_meters = {k:AverageMeter() for k in sub_dataset}
save_flag = False
trainer.g.eval()
trainer.ihdrnet.eval()
trainer.domain_encoder.eval()
# --------- 4. inference for each image ---------
for i_test, data in enumerate(test_inharm_dataloader):
inharmonious, mask_gt = data['comp'], data['mask']
inharmonious = inharmonious.type(torch.FloatTensor).to(device)
mask_gt = mask_gt.type(torch.FloatTensor).to(device)
with torch.no_grad():
rsts = {}
model = trainer
mask_main, retouched_img, guide_map = model.forward(inharmonious)
inharmonious_pred = mask_main[0]
inharmonious_pred = normPRED(inharmonious_pred)
mask_gt = normPRED(mask_gt)
pred = inharmonious_pred
label = mask_gt
F1 = FScore(pred, label)
mAP = compute_mAP(pred, label)
IoU = compute_IoU(pred, label)
gF1_meter.update(F1, n=1)
gmAP_meter.update(mAP, n=1)
gIoU_meter.update(IoU, n=1)
# sub dataset
sub_key = os.path.split(data['img_path'][0])[1][0]
if sub_key == 'a': key = 'HAdobe'
if sub_key == 'f': key = 'HFlickr'
if sub_key == 'd': key = 'HDay2Night'
if sub_key == 'c': key = 'HCOCO'
lmAP_meters[key].update(mAP, n=1)
lF1_meters[key].update(F1, n=1)
lIoU_meters[key].update(IoU, n=1)
total_iters += 1
if total_iters % 100 == 0:
print("Batch: [{}/{}] | AP:\t{:.4f} | F1:\t{:.4f} | IoU:\t{:.4f}".format(
total_iters, len(test_inharm_dataloader),
gmAP_meter.avg, gF1_meter.avg, gIoU_meter.avg,
))
if save_flag:
save_output({'mask_main':mask_main, 'retouched_img':retouched_img, 'guide_map':guide_map},
{'inharmonious':inharmonious, 'mask_gt':mask_gt},
prediction_dir,
data['img_path'][0],
extra_infos={'f1':F1, 'iou':IoU},
verbose=False)
print("\nModel:\t{}".format('MadisNet-{}'.format(opt.model)))
print("Average AP:\t{:.4f}".format(gmAP_meter.avg))
print("Average F1 Score:\t{:.4f}".format(gF1_meter.avg))
print("Average IoU:\t{:.4f}".format(gIoU_meter.avg))
for key in sub_dataset:
print("{}:".format(key))
print("AP:\t{:.4f}\tF1:\t{:.4f}\tIoU:\t{:.4f}".format(lmAP_meters[key].avg, lF1_meters[key].avg, lIoU_meters[key].avg)) | 5,710 | 29.704301 | 136 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/train.py | import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
import torchvision.transforms as standard_transforms
from tensorboardX import SummaryWriter
import numpy as np
import glob
import os
import itertools
import cv2
import multiprocessing as mp
from networks import DIRLNet, UNet, HDRPointwiseNN, DomainEncoder
from evaluation.metrics import FScore, normPRED, compute_mAP, compute_IoU, AverageMeter
# import matplotlib.pyplot as plt
from dataset.ihd_dataset import IhdDataset
from dataset.multi_objects_ihd_dataset import MultiObjectsIhdDataset
from options import ArgsParser
import pytorch_ssim
import pytorch_iou
# ------- 1. define loss function --------
bce_loss = nn.BCELoss(size_average=True)
ssim_loss = pytorch_ssim.SSIM(window_size=11,size_average=True)
iou_loss = pytorch_iou.IOU(size_average=True)
def bce_ssim_loss(pred,target, loss_weights=[1.0,1.0,1.0]):
bce_out = bce_loss(pred,target)
ssim_out = 1 - ssim_loss(pred,target)
iou_out = iou_loss(pred,target)
loss = bce_out*loss_weights[0] + ssim_out*loss_weights[1] + iou_out*loss_weights[2]
return {"total":loss, "bce":bce_out, "ssim":ssim_out, "iou":iou_out}
def multi_bce_loss_fusion(preds, labels_v, side_weights=1, loss_weights=[1.0,1.0,1.0]):
total_loss = 0
bce_out = 0
ssim_out = 0
iou_out = 0
if isinstance(side_weights, int):
side_weights = [side_weights] * len(preds)
for pred,w in zip(preds,side_weights):
loss = bce_ssim_loss(pred, labels_v, loss_weights)
total_loss += loss['total'] * w
bce_out += loss['bce']
ssim_out += loss['ssim']
iou_out += loss['iou']
return {"total":total_loss, "bce":bce_out, "ssim":ssim_out, "iou":iou_out}
class Trainer(object):
def __init__(self, opt):
self.opt = opt
# Set Loggers
if opt.is_train:
log_dir = os.path.join(opt.checkpoints_dir, "logs")
if not os.path.exists(log_dir): os.makedirs(log_dir)
self.writer = SummaryWriter(log_dir) # create a visualizer that display/save images and plots
# Set Device
self.gpus = opt.gpu_ids.split(',')
self.gpus = [int(id) for id in self.gpus]
self.device = torch.device('cuda:{}'.format(self.gpus[0])) if self.gpus[0]>-1 else torch.device('cpu') # get device name: CPU or GPU
print(self.device)
self.opt.device = self.device
self.opt.gpus = self.gpus
self.best_acc = 0
# ------- 3. define model --------
self.domain_encoder = DomainEncoder(style_dim=16)
self.ihdrnet = HDRPointwiseNN(opt)
if self.opt.model == 'dirl':
print("DIRL is used for MadisNet !")
self.g = DIRLNet(opt,3)
elif self.opt.model == 'unet':
print("UNet is used for MadisNet !")
self.g = UNet(in_ch=3, n_downs=5)
else:
raise ValueError("Unknown model:\t{}".format(self.opt.model))
g_size = sum(p.numel() for p in self.g.parameters())/1e6
ihdrnet_size = sum(p.numel() for p in self.ihdrnet.parameters())/1e6
e_dom_size = sum(p.numel() for p in self.domain_encoder.parameters())/1e6
print('--- G params: %.2fM' % (g_size))
print('--- iHDRNet params: %.2fM' % (ihdrnet_size))
print('--- E_dom params: %.2fM' % (e_dom_size))
print('--- Total params: %.2fM' % (g_size + ihdrnet_size + e_dom_size))
if len(self.gpus) > 1:
self.dataparallel_func = nn.DataParallel
else:
self.dataparallel_func = None
if opt.is_train == 1:
if self.dataparallel_func is not None:
self.domain_encoder = self.dataparallel_func(self.domain_encoder.to(self.device), self.gpus)
self.g = self.dataparallel_func(self.g.to(self.device), self.gpus)
self.ihdrnet = self.dataparallel_func(self.ihdrnet.to(self.device), self.gpus)
else:
self.domain_encoder.to(self.device)
self.g.to(self.device)
self.ihdrnet.to(self.device)
# Test
else:
self.domain_encoder.to(self.device)
self.g.to(self.device)
self.ihdrnet.to(self.device)
self.domain_encoder.eval()
self.g.eval()
self.ihdrnet.eval()
# ------- 2. set the directory of training dataset --------
self.data_mean = opt.mean.split(",")
self.data_mean = [float(m.strip()) for m in self.data_mean]
self.data_std = opt.std.split(",")
self.data_std = [float(m.strip()) for m in self.data_std]
dataset_loader = IhdDataset
inharm_dataset = dataset_loader(opt)
if opt.is_train == 0:
opt.batch_size = 1
opt.num_threads = 1
opt.serial_batches = True
# Training Set
self.inharm_dataloader = torch.utils.data.DataLoader(
inharm_dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads),
drop_last=True)
# Validation Set
opt.is_train = 0
opt.is_val = 1
opt.preprocess = 'resize'
opt.no_flip = True
self.val_dataloader = torch.utils.data.DataLoader(
dataset_loader(opt),
batch_size=1,
shuffle=False,
num_workers=1)
# Reset training state
opt.is_train = True
# ------- 4. define optimizer --------
if opt.is_train :
print("---define optimizer...")
self.image_display = None
self.domain_encoder_opt = optim.Adam(self.domain_encoder.parameters(), lr=opt.lr, betas=(0.9,0.999), weight_decay=opt.weight_decay)
self.g_opt = optim.Adam(self.g.parameters(), lr=opt.lr, betas=(0.9, 0.999), weight_decay=opt.weight_decay)
self.ihdrnet_opt = optim.Adam(self.ihdrnet.parameters(), lr=opt.lr, betas=(0.9,0.999), weight_decay=opt.weight_decay)
self.domain_encoder_schedular = optim.lr_scheduler.MultiStepLR(self.domain_encoder_opt, milestones=[30, 40, 50, 55], gamma=0.5)
self.g_schedular = optim.lr_scheduler.MultiStepLR(self.g_opt, milestones=[30, 40, 50, 55], gamma=0.5)
self.ihdrnet_schedular = optim.lr_scheduler.MultiStepLR(self.ihdrnet_opt, milestones=[30, 40, 50, 55], gamma=0.5)
def adjust_learning_rate(self):
self.domain_encoder_schedular.step()
self.g_schedular.step()
self.ihdrnet_schedular.step()
def write_display(self, total_it, model, batch_size):
# write loss
members = [attr for attr in dir(model) if not callable(getattr(model, attr)) and not attr.startswith("__") and attr.startswith('loss')]
for m in members:
self.writer.add_scalar(m, getattr(model, m), total_it)
# write img
if isinstance(model.image_display, torch.Tensor):
image_dis = torchvision.utils.make_grid(model.image_display, nrow=batch_size)
mean = torch.zeros_like(image_dis)
mean[0,:,:] = .485
mean[1,:,:] = .456
mean[2,:,:] = .406
std = torch.zeros_like(image_dis)
std[0,:,:] = 0.229
std[1,:,:] = 0.224
std[2,:,:] = 0.225
image_dis = image_dis*std + mean
self.writer.add_image('Image', image_dis, total_it)
def load_dict(self, net, name, resume_epoch, strict=True, checkpoints_dir=''):
if checkpoints_dir == '':
checkpoints_dir = self.opt.checkpoints_dir
ckpt_name = "{}_epoch{}.pth".format(name, resume_epoch)
if not os.path.exists(os.path.join(checkpoints_dir, ckpt_name)):
ckpt_name = "{}_epoch{}.pth".format(name, "best")
if not os.path.exists(os.path.join(checkpoints_dir, ckpt_name)):
ckpt_name = "{}_epoch{}.pth".format(name, "latest")
print("Loading model weights from {}...".format(ckpt_name))
# restore lr
sch = getattr(self, '{}_schedular'.format(name))
sch.last_epoch = resume_epoch if resume_epoch > 0 else 0
decay_coef = 0
for ms in sch.milestones.keys():
if sch.last_epoch <= ms: decay_coef+=1
for group in sch.optimizer.param_groups:
group['lr'] = group['lr'] * sch.gamma ** decay_coef
ckpt_dict = torch.load(os.path.join(checkpoints_dir,ckpt_name), map_location=self.device)
if 'best_acc' in ckpt_dict.keys():
new_state_dict = ckpt_dict['state_dict']
save_epoch = ckpt_dict['epoch']
self.best_acc = ckpt_dict['best_acc']
print("The model from epoch {} reaches acc at {:.4f} !".format(save_epoch, self.best_acc))
else:
new_state_dict = ckpt_dict
current_state_dict = net.state_dict()
new_keys = tuple(new_state_dict.keys())
for k in new_keys:
if k.startswith('module'):
v = new_state_dict.pop(k)
nk = k.split('module.')[-1]
new_state_dict[nk] = v
if len(self.gpus) > 1:
net.module.load_state_dict(new_state_dict, strict=strict)
else:
net.load_state_dict(new_state_dict, strict=True) # strict
def resume(self, resume_epoch, strict=True, is_pretrain=False, preference=[], checkpoints_dir=''):
if preference != []:
for net_name in preference:
net = getattr(self, net_name)
self.load_dict(net, net_name, resume_epoch, strict=strict, checkpoints_dir=checkpoints_dir)
return
def save(self, epoch, is_pretrain=False, preference=[]):
if preference != []:
for net_name in preference:
model_name = "{}_epoch{}.pth".format(net_name, epoch)
net = getattr(self, net_name)
save_dict = {
'epoch':epoch,
'best_acc':self.best_acc,
'state_dict':net.state_dict(),
'opt':getattr(self, '{}_schedular'.format(net_name)).state_dict()
}
torch.save(save_dict, os.path.join(self.opt.checkpoints_dir, model_name))
return
def denormalize(self, x, isMask=False):
if isMask:
mean = 0
std=1
else:
mean = torch.zeros_like(x)
mean[:,0,:,:] = .485
mean[:,1,:,:] = .456
mean[:,2,:,:] = .406
std = torch.zeros_like(x)
std[:,0,:,:] = 0.229
std[:,1,:,:] = 0.224
std[:,2,:,:] = 0.225
x = (x*std + mean)*255
x = x.cpu().detach().numpy().transpose(0,2,3,1).astype(np.uint8)
if isMask:
if x.shape[3] == 1:
x = x.repeat(3, axis=3)
return x
def norm(self, x):
mean = torch.zeros_like(x)
mean[:,0,:,:] = .485
mean[:,1,:,:] = .456
mean[:,2,:,:] = .406
std = torch.zeros_like(x)
std[:,0,:,:] = 0.229
std[:,1,:,:] = 0.224
std[:,2,:,:] = 0.225
x = (x - mean) / std #*255
return x
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def forward(self, img, mask=None):
retouched_img, guide_map = self.ihdrnet(img, img)
delta_img = retouched_img
mask_main = self.g(delta_img)['mask']
# domain codes
if mask is not None:
z_b = self.domain_encoder(img, 1-mask)
z_f = self.domain_encoder(img, mask)
z_mb = self.domain_encoder(retouched_img, 1-mask)
z_mf = self.domain_encoder(retouched_img, mask)
return mask_main, retouched_img,guide_map, z_b,z_f,z_mb,z_mf
else:
return mask_main, retouched_img, guide_map
def val(self, epoch=0, is_test=False):
print("---start validation---")
total_iters = 0
mAPMeter = AverageMeter()
F1Meter = AverageMeter()
FbMeter = AverageMeter()
IoUMeter = AverageMeter()
self.g.eval()
self.ihdrnet.eval()
self.domain_encoder.eval()
for i_test, data in enumerate(self.val_dataloader):
inharmonious, mask_gt = data['comp'], data['mask']
inharmonious = inharmonious.type(torch.FloatTensor).to(self.device)
mask_gt = mask_gt.type(torch.FloatTensor).to(self.device)
with torch.no_grad():
masks, _, guide_map = self.forward(inharmonious)
inharmonious_pred = masks[0]
inharmonious_pred = normPRED(inharmonious_pred)
mask_gt = normPRED(mask_gt)
pred = inharmonious_pred
label = mask_gt
F1 = FScore(pred, label)
mAP = compute_mAP(pred, label)
IoUMeter.update(compute_IoU(pred, label), label.size(0))
mAPMeter.update(mAP, inharmonious_pred.size(0))
F1Meter.update(F1, inharmonious_pred.size(0))
total_iters += 1
if total_iters % 100 == 0:
print("Batch: [{}/{}],\tmAP:\t{:.4f}\tF1:\t{:.4f}\t\tIoU:\t{:.4f}".format((i_test+1) , len(self.val_dataloader), \
mAPMeter.avg, F1Meter.avg, IoUMeter.avg))
if is_test:
name = self.opt.checkpoints_dir.split('/')[-1]
print("Model\t{}:\nmAP:\t{:.4f}\nF1:\t{:.4f}\nIoU:\t{:.4f}".format(name,\
mAPMeter.avg, F1Meter.avg, IoUMeter.avg))
else:
val_mIoU = IoUMeter.avg
if self.best_acc < val_mIoU:
self.best_acc = val_mIoU
self.save("best", preference=['g','ihdrnet','domain_encoder'])
print("New Best score!\nmAP:\t{:.4f},\tF1:\t{:.4f},\tIoU:\t{:.4f}".format(mAPMeter.avg, F1Meter.avg, val_mIoU))
self.g.train()
self.ihdrnet.train()
self.domain_encoder.train()
def train_epoch(self, epoch, total_epoch=100):
# ------- 5. training process --------
total_iters = epoch * len(self.inharm_dataloader)
running_loss = 0.0
running_tar_loss = 0.0
# Set meters
loss_total_meter = AverageMeter()
loss_det_meter = AverageMeter()
loss_reg_meter = AverageMeter()
loss_triplet_meter = AverageMeter()
F1Meter = AverageMeter()
self.ihdrnet.train()
self.g.train()
self.domain_encoder.train()
for i, data in enumerate(self.inharm_dataloader):
total_iters = total_iters + 1
inharmonious, mask_gt = data['comp'], data['mask']
inharmonious = inharmonious.type(torch.FloatTensor).to(self.device)
mask_gt = mask_gt.type(torch.FloatTensor).to(self.device)
# update the main generator and lut branch
self.ihdrnet_opt.zero_grad()
self.g_opt.zero_grad()
self.domain_encoder_opt.zero_grad()
masks, retouched_img, guide_map, z_b, z_f, z_mb,z_mf = self.forward(inharmonious, mask_gt)
inharmonious_pred = masks
if self.opt.model == 'dirl':
loss_inharmonious = multi_bce_loss_fusion([inharmonious_pred[0]], mask_gt, loss_weights=[1,self.opt.lambda_ssim, self.opt.lambda_iou])
self.loss_attention = multi_bce_loss_fusion(inharmonious_pred[1:], mask_gt, loss_weights=[1,self.opt.lambda_ssim, self.opt.lambda_iou])['total']
else:
loss_inharmonious = multi_bce_loss_fusion([inharmonious_pred[0]], mask_gt, loss_weights=[1,self.opt.lambda_ssim, self.opt.lambda_iou])
self.loss_detection_ssim = loss_inharmonious['ssim']
self.loss_detection_bce = loss_inharmonious['bce']
self.loss_detection = loss_inharmonious['total']
self.loss_total = self.loss_detection * self.opt.lambda_detection
if self.opt.model == 'dirl':
self.loss_total = self.loss_total + self.loss_attention * self.opt.lambda_attention
## triplet loss
eps = 1e-6
z_fb = z_f - z_b
z_mfmb = z_mf - z_mb
input_distance = (z_fb**2).sum(dim=1,keepdim=True)
magnify_distance = (z_mfmb**2).sum(dim=1,keepdim=True)
dir_cos = (z_fb*z_mfmb).sum(dim=1,keepdim=True) / (torch.norm(z_fb, dim=1, keepdim=True)*torch.norm(z_mfmb, dim=1, keepdim=True)+eps)
loss_reg = (1-dir_cos).mean()
loss_ddm = nn.ReLU()(input_distance-magnify_distance+self.opt.m).mean()
self.loss_triplet = loss_ddm * self.opt.lambda_tri + loss_reg * self.opt.lambda_reg
self.loss_total = self.loss_total + self.loss_triplet
self.loss_total.backward()
self.g_opt.step()
self.domain_encoder_opt.step()
self.ihdrnet_opt.step()
loss_total_meter.update(self.loss_total.item(), n=inharmonious.shape[0])
loss_det_meter.update(self.loss_detection.item(), n=inharmonious.shape[0])
loss_triplet_meter.update(self.loss_triplet.item(), n=inharmonious.shape[0])
F1Meter.update(FScore(inharmonious_pred[0], mask_gt), n=inharmonious.shape[0])
if total_iters % self.opt.print_freq == 0:
print("Epoch: [%d/%d], Batch: [%d/%d], train loss: %.3f, det loss: %.3f, tri loss: %.3f, F1 score: %.4f" % (
epoch + 1, self.opt.nepochs, (i + 1) , len(self.inharm_dataloader),
loss_total_meter.avg,
loss_det_meter.avg,
loss_triplet_meter.avg,
F1Meter.avg
))
if total_iters % self.opt.display_freq== 0: #
show_size = 5 if inharmonious.shape[0] > 5 else inharmonious.shape[0]
self.image_display = torch.cat([
inharmonious[0:show_size].detach().cpu(), # input image
mask_gt[0:show_size].detach().cpu().repeat(1,3,1,1), # ground truth
retouched_img[0:show_size].detach().cpu(),
inharmonious_pred[0][0:show_size].detach().cpu().repeat(1,3,1,1),
],dim=0)
self.write_display(total_iters, self, show_size)
# del temporary outputs and loss
del inharmonious_pred
def train(self, start_epoch=0):
# ------- 5. training process --------
print("---start training...")
for epoch in range(start_epoch, self.opt.nepochs):
self.train_epoch(epoch, total_epoch=self.opt.nepochs)
if (epoch+1) % self.opt.save_epoch_freq == 0:
self.save("{}".format(epoch), preference=['ihdrnet', 'g','domain_encoder'])
self.adjust_learning_rate()
if (epoch+1) < 30:
if (epoch+1) % self.opt.save_epoch_freq == 0:
self.val(epoch)
else:
if (epoch+1) % 3 == 0:
self.val(epoch)
print('-------------Congratulations, No Errors!!!-------------')
if __name__ == '__main__':
opt = ArgsParser()
opt.seed = 42
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
if torch.cuda.is_available(): torch.cuda.manual_seed_all(opt.seed)
print(opt.checkpoints_dir.split('/')[-1])
trainer = Trainer(opt)
start_epoch = 0
if opt.resume > -1:
trainer.resume(opt.resume, preference=['ihdrnet','g', 'domain_encoder'], checkpoints_dir=opt.pretrain_path)
start_epoch = opt.resume
trainer.train(start_epoch=start_epoch)
| 20,692 | 40.05754 | 160 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/evaluation/metrics.py | import numpy as np
import torch
from sklearn.metrics import average_precision_score
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def normPRED(d, eps=1e-2):
if isinstance(d, torch.Tensor):
ma = torch.max(d)
mi = torch.min(d)
if ma-mi<eps:
dn = d-mi
else:
dn = (d-mi)/(ma-mi)
elif isinstance(d, np.ndarray):
ma = np.max(d)
mi = np.min(d)
if ma-mi<eps:
dn = d-mi
else:
dn = (d-mi)/(ma-mi)
return dn
def compute_mAP(outputs, labels):
y_true = labels.cpu().detach().view(labels.size(0),-1).numpy()
y_pred = outputs.cpu().detach().view(labels.size(0),-1).numpy()
AP = []
for i in range(y_true.shape[0]):
uniques = np.unique(y_pred[i])
if len(uniques) == 1 and uniques[0] < 1e-4:
y_pred[i] += 1e-4
AP.append(average_precision_score(y_true[i],y_pred[i]))
return np.mean(AP)
def compute_IoU(pred, gt, threshold=0.5, eps=1e-6):
if isinstance(pred, torch.Tensor):
pred = torch.where(pred > threshold, torch.ones_like(pred), torch.zeros_like(pred)).to(pred.device)
intersection = (pred * gt).sum(dim=[1,2,3])
union = pred.sum(dim=[1,2,3]) + gt.sum(dim=[1,2,3]) - intersection
return (intersection / (union+eps)).mean().item()
elif isinstance(pred, np.ndarray):
pred_ = np.where(pred > threshold, 1.0, 0.0)
gt_ = np.where(gt > threshold, 1.0, 0.0)
intersection = (pred_ * gt_).sum()
union = pred_.sum() + gt_.sum() - intersection
return intersection / (union + eps)
def MAE(pred, gt):
if isinstance(pred, torch.Tensor):
return torch.mean(torch.abs(pred - gt))
elif isinstance(pred, np.ndarray):
return np.mean(np.abs(pred-gt))
def FScore(pred, gt, beta2=1.0, threshold=0.5, eps=1e-6, reduce_dims=[1,2,3]):
if isinstance(pred, torch.Tensor):
if threshold == -1: threshold = pred.mean().item() * 2
ones = torch.ones_like(pred).to(pred.device)
zeros = torch.zeros_like(pred).to(pred.device)
pred_ = torch.where(pred > threshold, ones, zeros)
gt = torch.where(gt>threshold, ones, zeros)
total_num = pred.nelement()
TP = (pred_ * gt).sum(dim=reduce_dims)
NumPrecision = pred_.sum(dim=reduce_dims)
NumRecall = gt.sum(dim=reduce_dims)
precision = TP / (NumPrecision+eps)
recall = TP / (NumRecall+eps)
F_beta = (1+beta2)*(precision * recall) / (beta2*precision + recall + eps)
F_beta = F_beta.mean()
elif isinstance(pred, np.ndarray):
if threshold == -1: threshold = pred.mean()* 2
pred_ = np.where(pred > threshold, 1.0, 0.0)
gt = np.where(gt > threshold, 1.0, 0.0)
total_num = np.prod(pred_.shape)
TP = (pred_ * gt).sum()
NumPrecision = pred_.sum()
NumRecall = gt.sum()
precision = TP / (NumPrecision+eps)
recall = TP / (NumRecall+eps)
F_beta = (1+beta2)*(precision * recall) / (beta2*precision + recall + eps)
return F_beta
if __name__ == "__main__":
gt = torch.ones((1,1,3,3))
gt[0][0][1][1] = 0
pred = torch.ones((1,1,3,3))
pred[0][0][1][2] = 0
pred[0][0][1][0] = 0
print(compute_IoU(pred, gt)) | 3,678 | 31.557522 | 107 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/networks/DIRL.py | import torch
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
import scipy.stats as st
import numpy as np
from torch.nn.parameter import Parameter
from networks.blocks import Conv2dBlock, BasicBlock, BasicConv
import cv2
import copy
import os
## ---------------------Bi-directional Feature Integration -----------------
class BidirectionFeatureIntegration(nn.Module):
def __init__(self, in_ch_list, out_ch=64, fusion_mode='h2l'):
super(BidirectionFeatureIntegration, self).__init__()
self.n_input = len(in_ch_list)
assert self.n_input > 0
self.fusion_mode = fusion_mode
self.downsample = nn.AvgPool2d(3,2,1)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.relu = nn.ReLU(True)
if self.fusion_mode == 'h2l' or self.fusion_mode == 'l2h':
l_in_ch = in_ch_list[0]
h_in_ch = in_ch_list[1]
self.top_down = Conv2dBlock(h_in_ch, l_in_ch, 3,1,1, norm='bn', activation='relu', activation_first=True)
self.bottom_up = Conv2dBlock(l_in_ch, h_in_ch, 3,2,1, norm='bn', activation='relu', activation_first=True)
if self.fusion_mode == 'h2l':
in_ch_ratio = 2
self.h_concat = Conv2dBlock(h_in_ch * in_ch_ratio, out_ch, 3,1,1, norm='bn', activation='relu', activation_first=True)
self.l_concat = Conv2dBlock(l_in_ch * in_ch_ratio, out_ch, 3,1,1, norm='bn', activation='relu', activation_first=True)
elif self.fusion_mode == 'l2h':
in_ch_ratio = 2
self.l_concat = Conv2dBlock(l_in_ch*in_ch_ratio, out_ch, 3,2,1, norm='bn', activation='relu', activation_first=True)
self.h_concat = Conv2dBlock(h_in_ch*in_ch_ratio, out_ch, 3,1,1, norm='bn', activation='relu', activation_first=True)
elif self.fusion_mode == 'hl2m' or self.fusion_mode == 'lh2m':
l_in_ch = in_ch_list[0]
m_in_ch = in_ch_list[1]
h_in_ch = in_ch_list[2]
self.top_down_h2m = Conv2dBlock(h_in_ch, m_in_ch, 3,1,1, norm='bn', activation='relu', activation_first=True)
self.top_down_m2l = Conv2dBlock(m_in_ch, l_in_ch, 3,1,1, norm='bn', activation='relu', activation_first=True)
self.bottom_up_m2h = Conv2dBlock(m_in_ch, h_in_ch, 3,2,1, norm='bn', activation='relu', activation_first=True)
self.bottom_up_l2m = Conv2dBlock(l_in_ch, m_in_ch, 3,2,1, norm='bn', activation='relu', activation_first=True)
in_ch_ratio = 2
self.l_concat = Conv2dBlock(l_in_ch * in_ch_ratio, out_ch, 3,2,1, norm='bn', activation='relu', activation_first=True)
self.m_concat = Conv2dBlock(m_in_ch * in_ch_ratio, out_ch, 3,1,1, norm='bn', activation='relu', activation_first=True)
self.h_concat = Conv2dBlock(h_in_ch * in_ch_ratio, out_ch, 3,1,1, norm='bn', activation='relu', activation_first=True)
else:
raise NameError("Unknown mode:\t{}".format(fusion_mode))
def forward(self, xl, xm=None, xh=None):
if self.fusion_mode == 'h2l' or self.fusion_mode == 'l2h':
# Bottom xl ----> xh Up
# | \
# Down \ xl <---- xh Top
# \ / \ /
# C -> + <-C
# ↓
# out
top_down_results = [xh]
xh2l = self.top_down(F.interpolate(xh, scale_factor=2))
top_down_results.insert(0, xl + xh2l)
bottom_up_results = [xl]
xl2h = self.bottom_up(xl)
bottom_up_results.append(xh+xl2h)
xl_cat = torch.cat([top_down_results[0],bottom_up_results[0]], dim=1)
xh_cat = torch.cat([top_down_results[1],bottom_up_results[1]], dim=1)
if self.fusion_mode == 'h2l':
xh_cat = self.h_concat(F.interpolate(xh_cat, scale_factor=2))
xl_cat = self.l_concat(xl_cat)
elif self.fusion_mode == 'l2h':
xh_cat = self.h_concat(xh_cat)
xl_cat = self.l_concat(xl_cat)
xout = xh_cat + xl_cat
elif self.fusion_mode == 'hl2m' or self.fusion_mode== 'lh2m':
# Bottom xl ----> xm ----> xh Up
# \ \ \
# Down \ xl <---- xm <---- xh Top
# \ / \ / \ /
# C ----> C <---- C
# ↓
# out
top_down_results = [xh]
xh2m = self.top_down_h2m(F.interpolate(xh, scale_factor=2)) + xm
top_down_results.insert(0, xh2m)
xm2l = self.top_down_m2l(F.interpolate(xh2m, scale_factor=2)) + xl
top_down_results.insert(0, xm2l)
bottom_up_results = [xl]
xl2m = self.bottom_up_l2m(xl) + xm
bottom_up_results.append(xl2m)
xm2h = self.bottom_up_m2h(xl2m) + xh
bottom_up_results.append(xm2h)
xl_cat = torch.cat([top_down_results[0],bottom_up_results[0]], dim=1)
xm_cat = torch.cat([top_down_results[1],bottom_up_results[1]], dim=1)
xh_cat = torch.cat([top_down_results[2],bottom_up_results[2]], dim=1)
xl_cat = self.l_concat(xl_cat)
xm_cat = self.m_concat(xm_cat)
xh_cat = self.h_concat(F.interpolate(xh_cat, scale_factor=2))
xout = xl_cat + xm_cat + xh_cat
return xout
class Transition(nn.Module):
def __init__(self, in_ch_list, out_ch_list):
super(Transition, self).__init__()
inch0, inch1, inch2, inch3, inch4 = in_ch_list
outch0, outch1, outch2, outch3, outch4 = out_ch_list
self.im0 = BidirectionFeatureIntegration([inch0,inch1], outch0, fusion_mode='h2l')
self.im1 = BidirectionFeatureIntegration([inch0,inch1, inch2], outch1, fusion_mode='hl2m')
self.im2 = BidirectionFeatureIntegration([inch1,inch2, inch3], outch2, fusion_mode='hl2m')
self.im3 = BidirectionFeatureIntegration([inch2,inch3, inch4], outch3, fusion_mode='hl2m')
self.im4 = BidirectionFeatureIntegration([inch3,inch4], outch4, fusion_mode='l2h')
def forward(self, xs, gc=None):
out_xs = []
out_xs.append(self.im0(xl=xs[0], xh=xs[1]))
out_xs.append(self.im1(xl=xs[0], xm=xs[1], xh=xs[2]))
out_xs.append(self.im2(xl=xs[1], xm=xs[2], xh=xs[3]))
out_xs.append(self.im3(xl=xs[2], xm=xs[3], xh=xs[4]))
out_xs.append(self.im4(xl=xs[3], xh=xs[4]))
return out_xs
class VanillaTransport(nn.Module):
def __init__(self, in_ch_list, out_ch_list):
super(VanillaTransport, self).__init__()
self.model = nn.ModuleDict()
idx = 0
for in_ch, out_ch in zip(in_ch_list, out_ch_list):
self.model[f'conv_{idx}'] = nn.Conv2d(in_ch_list[idx], out_ch_list[idx], 3,1,1)
idx += 1
def forward(self, xs):
x0,x1,x2,x3,x4 = xs
y0 = self.model['conv_0'](x0) # 224, 64
y1 = self.model['conv_1'](x1) # 112,64+128
y2 = self.model['conv_2'](x2) # 56, 256+512+512
y3 = self.model['conv_3'](x3) # 28, 512+512
y4 = self.model['conv_4'](x4) # 14, 512
return [y0,y1,y2,y3,y4]
## -----------------Mask-guided Dual Attention ---------------------
class ECABlock(nn.Module):
"""Constructs a ECA module.
Args:
channel: Number of channels of the input feature map
k_size: Adaptive selection of kernel size
"""
def __init__(self, channel, k_size=3):
super(ECABlock, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# x: input features with shape [b, c, h, w]
b, c, h, w = x.size()
# feature descriptor on the global spatial information
y = self.avg_pool(x)
# Two different branches of ECA module
y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
# Multi-scale information fusion
y = self.sigmoid(y)
return x * y.expand_as(x)
## SA
def _get_kernel(kernlen=16, nsig=3):
interval = (2*nsig+1.)/kernlen
x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kernel_raw = np.sqrt(np.outer(kern1d, kern1d))
kernel = kernel_raw/kernel_raw.sum()
return kernel
def min_max_norm(in_):
"""
normalization
:param in_:
:return:
"""
max_ = in_.max(3)[0].max(2)[0].unsqueeze(2).unsqueeze(3).expand_as(in_)
min_ = in_.min(3)[0].min(2)[0].unsqueeze(2).unsqueeze(3).expand_as(in_)
in_ = in_ - min_
return in_.div(max_ - min_ + 1e-8)
##
class SpatialGate(nn.Module):
def __init__(self, in_dim=2, mask_mode='mask'):
super(SpatialGate, self).__init__()
kernel_size = 7
self.mask_mode = mask_mode
self.spatial = nn.Sequential(*[
BasicConv(in_dim, in_dim, 3, 1, 1),
BasicConv(in_dim, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
])
if 'gb' in mask_mode.split('_')[-1]:
print("Using Gaussian Filter in mda!")
gaussian_kernel = np.float32(_get_kernel(31, 4))
gaussian_kernel = gaussian_kernel[np.newaxis, np.newaxis, ...]
self.gaussian_kernel = Parameter(torch.from_numpy(gaussian_kernel))
def forward(self, x):
x_compress = x
x_out = self.spatial(x_compress)
attention = F.sigmoid(x_out) # broadcasting
x = x * attention
if 'gb' in self.mask_mode:
soft_attention = F.conv2d(attention, self.gaussian_kernel, padding=15)
soft_attention = min_max_norm(soft_attention) # normalization
x = torch.mul(x, soft_attention.max(attention)) # x * max(soft, hard)
return x, attention#x_out#
class MaskguidedDualAttention(nn.Module):
def __init__(self, gate_channels, mask_mode='mask'):
super(MaskguidedDualAttention, self).__init__()
self.ChannelGate = ECABlock(gate_channels)
self.SpatialGate = SpatialGate(gate_channels, mask_mode=mask_mode)
self.mask_mode = mask_mode
def forward(self, x):
x_ca = self.ChannelGate(x)
x_out, mask = self.SpatialGate(x_ca)
return x_out + x_ca, mask
## -----------------Global-context Guided Decoder ---------------------
class GGDBlock(nn.Module):
def __init__(self, channel=32, is_outmost=False):
super(GGDBlock, self).__init__()
self.relu = nn.ReLU(True)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_inup = Conv2dBlock(channel, channel, 3, 1, padding=1, norm='bn', activation='none', use_bias=False)
self.conv_inbottom = Conv2dBlock(channel, channel, 3, 1, padding=1, norm='bn', activation='none', use_bias=False)
self.conv_cat = Conv2dBlock(channel*2, channel, 3, 1, padding=1, norm='bn', activation='none', use_bias=False)
self.outmost = is_outmost
if self.outmost:
self.conv4 = Conv2dBlock(channel, channel, 3, 1, padding=1, norm='bn', activation='none', use_bias=False)
self.conv5 = nn.Conv2d(channel, 1, 1)
def forward(self, x, up,bottom):
# x
# ↓
# <-[C]-- * ---- Up
# <--↑---------- Bottom
x_up = self.conv_inup(self.upsample(up)) * x # 28
x_bottom = self.conv_inbottom(self.upsample(bottom)) # 56
x_cat = torch.cat((x_up, x_bottom), 1) # 28
x_out = self.conv_cat(x_cat) # 28
xup_out = x_out
xbottom_out = x_bottom
if self.outmost:
x_out = self.upsample(x_out)
# x = self.conv4(x_out)
x = self.conv5(x_out)
return {'xup':x, 'xbottom':x_out}
else:
return {'xup':xup_out, 'xbottom':xbottom_out}
class GGD(nn.Module):
def __init__(self, channel=32, nstage=4):
super(GGD, self).__init__()
self.decoder = nn.ModuleDict()
self.nstage = nstage - 1
for i in range(self.nstage):
if i == 0: self.decoder['d0'] = GGDBlock(channel=channel, is_outmost=True)
else:
self.decoder['d{}'.format(i)] = GGDBlock(channel=channel, is_outmost=False)
def forward(self, xs):
#x0,x1,x2,x3,x4,x5=xs
xup = xdown = xs[-1]
for i, x in enumerate(
xs[1:-1][::-1]
):
idx = self.nstage - i - 1
xout = self.decoder['d{}'.format(idx)](x, xup,xdown)
xup,xdown = xout['xup'], xout['xbottom']
return xup
## ----------------DIRL --------------------------------
class InharmoniousEncoder(nn.Module):
def __init__(self, opt, n_channels=3):
super(InharmoniousEncoder, self).__init__()
if opt.backbone == 'resnet34':
resnet = models.resnet34(pretrained=True)
self.in_dims = [64, 128, 256, 512, 512]
elif opt.backbone == 'resnet50':
resnet = models.resnet50(pretrained=True)
self.in_dims = [64, 256, 512, 1024, 2048]
## -------------Encoder--------------
self.inconv = nn.Conv2d(n_channels,64,3,1,padding=1)
self.inbn = nn.BatchNorm2d(64)
self.inrelu = nn.ReLU(inplace=True) #224,64
self.maxpool = nn.MaxPool2d(3,2,1)
#stage 1
self.encoder1 = resnet.layer1 #112,64*4
#stage 2
self.encoder2 = resnet.layer2 #56,128*4
#stage 3
self.encoder3 = resnet.layer3 #28,256*4
#stage 4
self.encoder4 = resnet.layer4 #14,512*4
self.encoder5 = nn.Sequential(*[
BasicBlock(resnet.inplanes, 512),
BasicBlock(512, 512),
BasicBlock(512, 512),
])
self.inplanes = resnet.inplanes
def forward(self, x, backbone_features=None):
hx = x
hx = self.inconv(hx)
hx = self.inbn(hx)
hx = self.inrelu(hx)
h1 = self.encoder1(hx) # 224
h2 = self.encoder2(h1) # 112
h3 = self.encoder3(h2) # 56
h4 = self.encoder4(h3) # 28
hx = self.maxpool(h4)
h5 = self.encoder5(hx) # 14
return {"skips":[h1,h2,h3,h4,h5]}
class InharmoniousDecoder(nn.Module):
def __init__(self,opt, n_channels=3):
super(InharmoniousDecoder,self).__init__()
## -------------Dimention--------------
self.opt = opt
if opt.backbone == 'resnet34':
self.dims = [512,512,256,128,64,64]
elif opt.backbone == 'resnet50':
self.dims = [2048, 1024, 512, 256, 64,64]
self.n_layers = len(self.dims)-1
## ------------Transition Layer------
self.trans_in_list = self.dims[:-1][::-1]
self.trans_out_list = [opt.ggd_ch] * 5
self.trans = Transition(
in_ch_list=self.trans_in_list,
out_ch_list=self.trans_out_list,
)
## ------------Attention Layer-----------
self.attention_layers= nn.ModuleDict()
for i in range(self.n_layers):
if self.opt.mda_mode == 'vanilla':
print("Using vanilla mda!")
elif 'mask' in self.opt.mda_mode:
print("Using learnable mask mda!")
self.attention_layers['mda_{}'.format(i)] = MaskguidedDualAttention(opt.ggd_ch, mask_mode=self.opt.mda_mode)
# ------------ Decoder Layer-----------
self.decoder_layers = nn.ModuleDict()
self.decoder_layers['deconv'] = GGD(opt.ggd_ch)
def forward(self,z):
x = z['skips']
mda_masks = []
## -------------Layer Fusion-------
x = self.trans(x)
## -------------Attention ------
for i in range(self.n_layers-1, -1, -1):
fused_layer = x[i]
fused_layer, m = self.attention_layers['mda_{}'.format(i)](fused_layer)
dst_shape = tuple(x[0].shape[2:])
m = F.interpolate(m, size=dst_shape, mode='bilinear', align_corners=True)
mda_masks.append(m)
x[i] = fused_layer
## ------------Decoding --------
x = self.decoder_layers['deconv'](x).sigmoid()
if self.opt.mda_mode != 'vanilla':
return {"mask":[x]+mda_masks}
else:
return {"mask":[x]}
class DIRLNet(nn.Module):
def __init__(self, opt, input_nc=3):
super(DIRLNet, self).__init__()
self.encoder = InharmoniousEncoder(opt, input_nc)
self.decoder = InharmoniousDecoder(opt, input_nc)
self.opt = opt
self.inplanes = self.encoder.inplanes
def forward(self, x):
z = self.encoder(x)
out =self.decoder(z)
# pred = out['mask']
extra_info = {'lut_z':z['skips'][3]}
out.update(extra_info)
return out
def load_dict(self, net, load_path, strict=True):
ckpt_dict = torch.load(load_path, map_location=self.opt.device)
if 'best_acc' in ckpt_dict.keys():
new_state_dict = ckpt_dict['state_dict']
save_epoch = ckpt_dict['epoch']
self.best_acc = ckpt_dict['best_acc']
print("The model from epoch {} reaches acc at {:.4f} !".format(save_epoch, self.best_acc))
else:
new_state_dict = ckpt_dict
current_state_dict = net.state_dict()
new_keys = tuple(new_state_dict.keys())
for k in new_keys:
if k.startswith('module'):
v = new_state_dict.pop(k)
nk = k.split('module.')[-1]
new_state_dict[nk] = v
if len(self.opt.gpus) > 1:
net.module.load_state_dict(new_state_dict, strict=strict)
else:
net.load_state_dict(new_state_dict, strict=True) # strict
def load_pretrain_params(self, load_path):
encoder_path = os.path.join(load_path, 'encoder_epoch60.pth')
decoder_path = os.path.join(load_path, 'decoder_epoch60.pth')
self.load_dict(self.encoder, encoder_path, strict=True)
self.load_dict(self.decoder, decoder_path, strict=True)
return
| 18,647 | 40.44 | 134 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/networks/iHDRNet.py | import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import Parameter
from networks.blocks import Conv2d_cd, ResNetBlock, Conv2dBlock
import numpy as np
class SelfAttention(nn.Module):
""" Self attention Layer"""
def __init__(self,in_dim, mode='self'):
super(SelfAttention,self).__init__()
self.chanel_in = in_dim
self.mode = mode
reduction = 8
self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//reduction , kernel_size= 1)
self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//reduction , kernel_size= 1)
self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)
self.gamma = nn.Parameter(torch.zeros(1))
self.D = np.sqrt(in_dim//reduction)
self.softmax = nn.Softmax(dim=-1) #
def forward(self,x, y=None):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize,C,width ,height = x.size()
proj_query = self.query_conv(x).view(m_batchsize,-1,width*height).permute(0,2,1) # B X CX(N)
if self.mode == 'self':
proj_key = self.key_conv(x).view(m_batchsize,-1,width*height) # B X C x (*W*H)
energy = torch.bmm(proj_query,proj_key) # transpose check
attention = self.softmax(energy / self.D) # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N
else:
proj_key = self.key_conv(y).view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query,proj_key) # transpose check
attention = self.softmax(energy / self.D) # BX (N) X (N)
proj_value = self.value_conv(y).view(m_batchsize,-1,width*height) # B X C X N
out = torch.bmm(proj_value,attention.permute(0,2,1))
out = out.view(m_batchsize,C,width,height)
if self.mode == 'self':
out = self.gamma*out + x
else:
out = torch.cat([out, x],dim=1)
return out
class Slice(nn.Module):
def __init__(self):
super(Slice, self).__init__()
def forward(self, bilateral_grid, guidemap, mode='hdr'):
# Nx12x8x16x16
device = bilateral_grid.get_device()
N, _, H, W = guidemap.shape
hg, wg = torch.meshgrid([torch.arange(0, H), torch.arange(0, W)]) # [0,511] HxW
if device >= 0:
hg = hg.to(device)
wg = wg.to(device)
hg = hg.float().repeat(N, 1, 1).unsqueeze(3) / (H-1) * 2 - 1 # norm to [-1,1] NxHxWx1
wg = wg.float().repeat(N, 1, 1).unsqueeze(3) / (W-1) * 2 - 1 # norm to [-1,1] NxHxWx1
guidemap = guidemap.permute(0,2,3,1).contiguous()
guidemap_guide = torch.cat([hg, wg, guidemap], dim=3).unsqueeze(1) # Nx1xHxWx3
coeff = F.grid_sample(bilateral_grid, guidemap_guide, 'bilinear', align_corners=True) # Nx12xHxW
return coeff.squeeze(2)
class ApplyCoeffs(nn.Module):
def __init__(self, use_norm=False):
super(ApplyCoeffs, self).__init__()
self.use_norm = use_norm
def denormalize(self, x, isMask=False):
if isMask:
mean = 0
std=1
else:
mean = torch.zeros_like(x)
mean[:,0,:,:] = .485
mean[:,1,:,:] = .456
mean[:,2,:,:] = .406
std = torch.zeros_like(x)
std[:,0,:,:] = 0.229
std[:,1,:,:] = 0.224
std[:,2,:,:] = 0.225
x = (x*std + mean) #*255
return x # change the range into [0,1]
def norm(self, x):
mean = torch.zeros_like(x)
mean[:,0,:,:] = .485
mean[:,1,:,:] = .456
mean[:,2,:,:] = .406
std = torch.zeros_like(x)
std[:,0,:,:] = 0.229
std[:,1,:,:] = 0.224
std[:,2,:,:] = 0.225
x = (x - mean) / std #*255
return x
def forward(self, coeff, full_res_input):
'''
Affine:
r = a11*r + a12*g + a13*b + a14
g = a21*r + a22*g + a23*b + a24
...
'''
full_res_input = self.denormalize(full_res_input)
# coeff[:,:,:20] = coeff[:,:,50:70]
R = torch.sum(full_res_input * coeff[:, 0:3, :, :], dim=1, keepdim=True) + coeff[:, 3:4, :, :]
G = torch.sum(full_res_input * coeff[:, 4:7, :, :], dim=1, keepdim=True) + coeff[:, 7:8, :, :]
B = torch.sum(full_res_input * coeff[:, 8:11, :, :], dim=1, keepdim=True) + coeff[:, 11:12, :, :]
# return self.norm(torch.cat([R, G, B], dim=1))
if self.use_norm:
return self.norm(torch.cat([R, G, B], dim=1))
else:
return torch.cat([R, G, B], dim=1)
class GuideNN(nn.Module):
def __init__(self, params=None):
super(GuideNN, self).__init__()
self.params = params
self.conv1 = Conv2dBlock(3, 16, ks=1, st=1, padding=0, norm='bn')
self.conv2 = Conv2dBlock(16, 1, ks=1, st=1, padding=0, norm='none', activation='tanh') #nn.Tanh, nn.Sigmoid
def forward(self, x):
return self.conv2(self.conv1(x))#.squeeze(1)
class Coeffs(nn.Module):
def __init__(self, nin=4, nout=3, params=None):
super(Coeffs, self).__init__()
self.params = params
self.nin = nin
self.nout = nout
lb = params['luma_bins']
cm = params['channel_multiplier']
sb = params['spatial_bin']
bn = params['batch_norm']
theta = params['theta']
nsize = params['net_input_size']
self.relu = nn.ReLU()
# splat features
n_layers_splat = int(np.log2(nsize/sb))
self.splat_features = nn.ModuleList()
self.lp_features = nn.ModuleList()
prev_ch = 3 #3
# Downsample
for i in range(n_layers_splat):
use_bn = bn if i > 0 else False
self.splat_features.append(Conv2d_cd(prev_ch, cm*(2**i)*lb, 3, 1, 1, use_bn=use_bn, actv='relu', theta=theta))
self.splat_features.append(nn.MaxPool2d(2,2,0))
prev_ch = splat_ch = cm*(2**i)*lb
# ResNet Blocks
self.res_blks = nn.ModuleList()
for i in range(3):
self.res_blks.append(ResNetBlock(prev_ch, prev_ch))
#Self-attention
self.sa = SelfAttention(prev_ch)
self.conv_out = nn.Sequential(*[
Conv2dBlock(prev_ch, 8*cm*lb, ks=3, st=1, padding=1, norm='bn'),
Conv2dBlock(8*cm*lb, lb*nin*nout, ks=1, st=1, padding=0, norm='none', activation='none')
])
# predicton
self.conv_out = Conv2dBlock(8*cm*lb, lb*nout*nin, ks=1, st=1, padding=0, norm='none', activation='none')
def forward(self, lowres_input):
params = self.params
bs = lowres_input.shape[0]
lb = params['luma_bins']
cm = params['channel_multiplier']
sb = params['spatial_bin']
x = lowres_input
for layer in self.splat_features:
x = layer(x)
for layer in self.res_blks:
x = layer(x)
x = self.sa(x)
x = self.conv_out(x) # 1,96,16,16
s = x.shape
y = torch.stack(torch.split(x, self.nin*self.nout, 1),2) # B x Coefs x Luma x Spatial x Spatial -> (B, 12,8,16,16)
return y
class HDRPointwiseNN(nn.Module):
def __init__(self, opt):
super(HDRPointwiseNN, self).__init__()
params = {'luma_bins':opt.luma_bins, 'channel_multiplier':opt.channel_multiplier, 'spatial_bin':opt.spatial_bin,
'batch_norm':opt.batch_norm, 'net_input_size':opt.net_input_size, 'theta':opt.theta}
self.coeffs = Coeffs(params=params)
self.guide = GuideNN(params=params)
self.slice = Slice()
self.apply_coeffs = ApplyCoeffs()
self.mean = [.485, .456, .406]
self.std = [.229, .224, .225]
self.max_val = [(1-m)/s for m,s in zip(self.mean, self.std)]
self.min_val = [(0-m)/s for m,s in zip(self.mean, self.std)]
def clip(self, x):
y = x.new(x.size())
for i in range(3):
y[:,i,:,:] = torch.clamp(x[:,i,:,:], min=self.min_val[i], max=self.max_val[i])
return y
def norm(self, x):
mean = torch.zeros_like(x)
mean[:,0,:,:] = .485
mean[:,1,:,:] = .456
mean[:,2,:,:] = .406
std = torch.zeros_like(x)
std[:,0,:,:] = 0.229
std[:,1,:,:] = 0.224
std[:,2,:,:] = 0.225
x = (x - mean) / std #*255
return x
def forward(self, lowres, fullres):
coeffs = self.coeffs(lowres)
guide = self.guide(fullres)
slice_coeffs = self.slice(coeffs, guide)
illu_out = self.apply_coeffs(slice_coeffs, fullres).sigmoid()
out = self.clip(illu_out)
return out, guide | 9,047 | 36.38843 | 122 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/networks/UNet.py | import torch
import torchvision.models as models
import torch.nn.functional as F
import torch.nn as nn
from networks.blocks import BasicBlock, Bottleneck
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, skip_channels, out_channels, upsample=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if upsample:
self.up = nn.Sequential(*[
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(in_channels, skip_channels, 3,1,1)
])
else:
self.up = nn.Conv2d(in_channels, skip_channels, 3,1,1)
self.conv = DoubleConv(skip_channels*2, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat((x1,x2), dim=1)
return self.conv(x)
class UNet(nn.Module):
def __init__(self, in_ch=3, n_class=1, nf=64, n_downs=4, backbone='resnet34'):
super(UNet, self).__init__()
if backbone == 'none':
self.in_conv = DoubleConv(in_ch, nf)
elif 'resnet' in backbone:
self.in_conv = nn.Sequential(*[
nn.Conv2d(in_ch, nf, 3,1,1,bias=False),
nn.BatchNorm2d(nf),
nn.ReLU(True)
])
self.downs = nn.ModuleDict()
self.ups = nn.ModuleDict()
self.n_downs = n_downs
extra_dim = 512
if 'resnet' in backbone:
if backbone == 'resnet34':
resnet = models.resnet34(True)
dims = [64,64,128,256,512]
if n_downs-4>0: dims += [extra_dim]*(n_downs-4)
elif backbone == 'resnet50':
resnet = models.resnet50(True)
dims = [64,64*4,128*4,256*4,512*4]
if n_downs-4>0: dims += [extra_dim]*(n_downs-4)
elif backbone == 'none':
dims = []
for i in range(min(4+1, n_downs+1)):
dims.append(nf*(2**i))
if n_downs-4>0:
if n_downs-4>0: dims += [extra_dim]*(n_downs-4)
# Build encoder
for i in range(n_downs):
if backbone == 'none':
self.downs['d{}'.format(i)] = Down(dims[i], dims[i+1])
elif 'resnet' in backbone:
if i < 4:
self.downs['d{}'.format(i)] = getattr(resnet, 'layer{}'.format(i+1))
elif i == 4:
self.downs['d{}'.format(i)] = nn.Sequential(*[
nn.MaxPool2d(3,2,1),
BasicBlock(dims[-1], extra_dim),
BasicBlock(extra_dim, extra_dim),
BasicBlock(extra_dim, extra_dim)
])
else:
self.downs['d{}'.format(i)] = nn.Sequential(*[
nn.MaxPool2d(3,2,1),
BasicBlock(extra_dim, extra_dim),
BasicBlock(extra_dim, extra_dim),
BasicBlock(extra_dim, extra_dim)
])
# Build Decoder
for i in range(n_downs):
if i == n_downs - 1:
self.ups['u{}'.format(i)] = Up(dims[i+1], dims[i+1], dims[i], False)
else:
self.ups['u{}'.format(i)] = Up(dims[i+1], dims[i+1], dims[i])
self.bottleneck = DoubleConv(dims[-1], dims[-1])
self.out_conv = nn.Sequential(*[
nn.Conv2d(nf, n_class, 1,1,0)
])
def forward(self, x):
hx = self.in_conv(x)
enc_xs = []
for i in range(self.n_downs):
hx = self.downs['d{}'.format(i)](hx)
enc_xs.append(hx)
hx = self.bottleneck(hx)
for i in range(self.n_downs):
idx = self.n_downs - i - 1
hx = self.ups['u{}'.format(idx)](hx, enc_xs[idx])
logits = self.out_conv(hx)
logits = F.interpolate(logits , x.shape[2:][::-1], mode='bilinear', align_corners=True)
logits = logits.sigmoid()
return {"mask":[logits]}
| 5,067 | 34.943262 | 96 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/networks/E_dom.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
import torchvision.models as models
from networks.blocks import PartialConv2d
class DomainEncoder(nn.Module):
def __init__(self, style_dim):
super(DomainEncoder, self).__init__()
vgg16 = models.vgg16(pretrained=True)
self.actv = nn.ReLU(True)
self.maxpooling = nn.MaxPool2d(2,2,0,ceil_mode=False)
# encoder 1
self.conv0 = PartialConv2d(vgg16.features[0])
self.conv1 = PartialConv2d(vgg16.features[2])
# encoder 2
self.conv2 = PartialConv2d(vgg16.features[5])
self.conv3 = PartialConv2d(vgg16.features[7])
# encoder 3
self.conv4 = PartialConv2d(vgg16.features[10])
self.conv5 = PartialConv2d(vgg16.features[12])
self.conv6 = PartialConv2d(vgg16.features[14])
# fix the encoder
for i in range(7):
for param in getattr(self, 'conv{:d}'.format(i)).parameters():
param.requires_grad = False
# adaptor
self.adaptor1 = nn.Conv2d(self.conv1.out_channels, style_dim, kernel_size=1, stride=1, bias=False)
self.adaptor1 = PartialConv2d(self.adaptor1)
self.adaptor2 = nn.Conv2d(self.conv3.out_channels, style_dim, kernel_size=1, stride=1, bias=False)
self.adaptor2 = PartialConv2d(self.adaptor2)
self.adaptor3 = nn.Conv2d(self.conv6.out_channels, style_dim, kernel_size=1, stride=1, bias=False)
self.adaptor3 = PartialConv2d(self.adaptor3)
self.weight = nn.Parameter(torch.ones((3,), dtype=torch.float32), requires_grad=True)
self.avg_pooling = nn.AdaptiveAvgPool2d(1)
def train(self, mode=True):
self.adaptor1.train()
self.adaptor2.train()
self.adaptor3.train()
self.weight.requires_grad = True
def forward(self, input, mask, eps=1e-8):
"""Standard forward."""
xb = input
mb = mask
# Encoder
xb, mb = self.conv0(xb, mb)
xb = self.actv(xb)
xb, mb = self.conv1(xb, mb)
xb = self.actv(xb)
x1b = self.maxpooling(xb)
m1b = self.maxpooling(mb)
xb, mb = self.conv2(x1b, m1b)
xb = self.actv(xb)
xb, mb = self.conv3(xb, mb)
xb = self.actv(xb)
x2b = self.maxpooling(xb)
m2b = self.maxpooling(mb)
xb, mb = self.conv4(x2b, m2b)
xb = self.actv(xb)
xb, mb = self.conv5(xb, mb)
xb = self.actv(xb)
xb, mb = self.conv6(xb, mb)
xb = self.actv(xb)
x3b = self.maxpooling(xb)
m3b = self.maxpooling(mb)
# Domain code
w = self.weight.sigmoid()
w = w / (w.sum() + eps)
x1b,_ = self.adaptor1(x1b, m1b)
x1b = self.avg_pooling(x1b)
x2b,_ = self.adaptor2(x2b, m2b)
x2b = self.avg_pooling(x2b)
x3b,_ = self.adaptor3(x3b, m3b)
x3b = self.avg_pooling(x3b)
s = w[0]*x1b + w[1]*x2b + w[2]*x3b
return s
| 3,091 | 30.876289 | 106 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/networks/blocks.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
import torch.nn.functional as F
from torch import nn
import math
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=nn.BatchNorm2d):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
if downsample is None and inplanes != planes:
self.downsample = nn.Conv2d(inplanes, planes, 1,1,0)
else:
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
# Used for spatial attention
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
# Central Difference Convolutional Network
class Conv2d_cd(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, dilation=1, groups=1, bias=False, theta=0.7, use_bn=True, actv='relu'):
super(Conv2d_cd, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.theta = theta
self.bn = nn.BatchNorm2d(out_channels)
if actv == 'relu':
self.actv = nn.ReLU(True)
elif actv == 'lrelu':
self.actv = nn.LeakyReLU(0.2)
elif actv == 'tanh':
self.actv = nn.Tanh()
elif actv == 'sigmoid':
self.actv = nn.Sigmoid()
elif actv == 'elu':
self.actv = nn.ELU()
else:
self.actv = nn.Identity()
def forward(self, x):
out_normal = self.conv(x)
if math.fabs(self.theta - 0.0) < 1e-6:
diff = out_normal
else:
#pdb.set_trace()
[C_out,C_in, kernel_size,kernel_size] = self.conv.weight.shape
kernel_diff = self.conv.weight.sum(2).sum(2)
kernel_diff = kernel_diff[:, :, None, None]
out_diff = F.conv2d(input=x, weight=kernel_diff, bias=self.conv.bias, stride=self.conv.stride, padding=0, groups=self.conv.groups)
diff = out_normal - self.theta * out_diff
out = self.bn(diff)
out = self.actv(out)
return out
class PartialConv2d(nn.Module):
def __init__(self, conv_module):
super(PartialConv2d, self).__init__()
# whether the mask is multi-channel or not
self.multi_channel = False
self.return_mask = True
self.kernel_size = conv_module.kernel_size
self.stride = conv_module.stride
self.padding = conv_module.padding
self.dilation = conv_module.dilation
self.bias = conv_module.bias
self.in_channels = conv_module.in_channels
self.out_channels = conv_module.out_channels
self.conv = conv_module
self.weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0], self.kernel_size[1])
self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2] * \
self.weight_maskUpdater.shape[3]
self.last_size = (None, None, None, None)
self.update_mask = None
self.mask_ratio = None
def forward(self, input, mask_in=None):
assert len(input.shape) == 4
if mask_in is not None or self.last_size != tuple(input.shape):
self.last_size = tuple(input.shape)
with torch.no_grad():
if self.weight_maskUpdater.type() != input.type():
self.weight_maskUpdater = self.weight_maskUpdater.to(input)
if mask_in is None:
# if mask is not provided, create a mask
mask = torch.ones(1, 1, input.data.shape[2], input.data.shape[3]).to(input)
else:
mask = mask_in
self.update_mask = F.conv2d(mask, self.weight_maskUpdater, bias=None, stride=self.stride,
padding=self.padding, dilation=self.dilation, groups=1)
self.mask_ratio = self.slide_winsize / (self.update_mask + 1e-8)
self.update_mask = torch.clamp(self.update_mask, 0, 1)
self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask)
raw_out = self.conv.forward(torch.mul(input, mask) if mask_in is not None else input)
if self.bias is not None:
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view
output = torch.mul(output, self.update_mask)
else:
output = torch.mul(raw_out, self.mask_ratio)
if self.return_mask:
return output, self.update_mask
else:
return output
class ResNetBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride = 1, bias=False, actv='relu'):
super(ResNetBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(int(in_channels*(1.5)))
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv1 = nn.Conv2d(in_channels, int(in_channels*(1.5)), 3,1,1, bias=False)
self.conv2 = nn.Conv2d(int(in_channels*(1.5)), out_channels, 3,1,1, bias=False)
self.adapt = nn.Conv2d(in_channels, out_channels, 1,1,0, bias=False) if in_channels != out_channels else nn.Identity()
if actv == 'relu':
self.actv = nn.ReLU(True)
elif actv == 'lrelu':
self.actv = nn.LeakyReLU(0.2)
elif actv == 'tanh':
self.actv = nn.Tanh()
elif actv == 'sigmoid':
self.actv = nn.Sigmoid()
elif actv == 'elu':
self.actv = nn.ELU()
def forward(self, x):
res = x
x = self.conv1(x)
x = self.bn1(x)
x = self.actv(x)
x = self.conv2(x)
x = self.bn2(x)
res = self.adapt(x)
out = res + x
return self.actv(out)
class Conv2dBlock(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0, dilation=1,
norm='none', activation='relu', pad_type='zero',
use_bias=True, activation_first=False):
super(Conv2dBlock, self).__init__()
self.use_bias = use_bias
self.activation_first = activation_first
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
elif pad_type == 'none':
self.pad = None
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
if activation_first == True:
self.activation = nn.ReLU(inplace=False)
else:
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, dilation=dilation, bias=self.use_bias)
def forward(self, x):
if self.activation_first:
if self.activation:
x = self.activation(x)
if self.pad is not None:
x = self.conv(self.pad(x))
else:
x = self.conv(x)
if self.norm:
x = self.norm(x)
else:
if self.pad is not None:
x = self.conv(self.pad(x))
else:
x = self.conv(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and \
self.bias is not None, "Please assign AdaIN weight first"
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(
x_reshaped, running_mean, running_var, self.weight, self.bias,
True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
| 13,469 | 35.016043 | 158 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/dataset/base_dataset.py | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
#from PIL import Image
import cv2
#import torchvision.transforms as transforms
from abc import ABC, abstractmethod
from albumentations import HorizontalFlip, RandomResizedCrop, Compose, DualTransform
import albumentations.augmentations.transforms as transforms
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
self.root = opt.dataset_root #mia
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
class HCompose(Compose):
def __init__(self, transforms, *args, additional_targets=None, no_nearest_for_masks=True, **kwargs):
if additional_targets is None:
additional_targets = {
'real': 'image',
'mask': 'mask'
}
self.additional_targets = additional_targets
super().__init__(transforms, *args, additional_targets=additional_targets, **kwargs)
if no_nearest_for_masks:
for t in transforms:
if isinstance(t, DualTransform):
t._additional_targets['mask'] = 'image'
# t._additional_targets['edge'] = 'image'
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, grayscale=False, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.ToGray())
if opt.preprocess == 'resize_and_crop':
if params is None:
transform_list.append(RandomResizedCrop(opt.crop_size, opt.crop_size, scale=(0.9, 1.0))) # 0.5,1.0
elif opt.preprocess == 'resize':
transform_list.append(transforms.Resize(opt.crop_size, opt.crop_size))
elif opt.preprocess == 'none':
return HCompose(transform_list)
if not opt.no_flip:
if params is None:
# print("flip")
transform_list.append(HorizontalFlip())
return HCompose(transform_list)
def __make_power_2(img, base):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
__print_size_warning(ow, oh, w, h)
return cv2.resize(img, (w, h), interpolation = cv2.INTER_LINEAR)
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
| 4,950 | 35.138686 | 141 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/dataset/multi_objects_ihd_dataset.py | import os.path
import os
import torch
import torchvision.transforms.functional as tf
from dataset.base_dataset import BaseDataset, get_transform
#from PIL import Image
import cv2
import numpy as np
import torchvision.transforms as transforms
import random
import torch.nn.functional as F
import copy
class MultiObjectsIhdDataset(BaseDataset):
"""A template dataset class for you to implement custom datasets."""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.add_argument('--is_train', type=bool, default=True, help='whether in the training phase')
parser.set_defaults(max_dataset_size=float("inf"), new_dataset_option=2.0) # specify dataset-specific default values
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
"""
# save the option and dataset root
BaseDataset.__init__(self, opt)
self.image_paths = []
self.opt = copy.copy(opt)
self.phase = opt.phase
if opt.phase=='train':
# print('loading training file: ')
self.trainfile = os.path.join(opt.dataset_root,'released_train_le50.txt')
self.keep_background_prob = 0.05 # 0.05
with open(self.trainfile,'r') as f:
for line in f.readlines():
self.image_paths.append(line.rstrip())
elif opt.phase == 'val' or opt.phase == 'test':
print('loading {} file'.format(opt.phase))
self.keep_background_prob = -1
self.trainfile = os.path.join(opt.dataset_root,'released_{}_le50.txt'.format('test'))
with open(self.trainfile,'r') as f:
for line in f.readlines():
self.image_paths.append(line.rstrip())
self.transform = get_transform(opt)
self.input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
(0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)
)
])
# avoid the interlock problem of the opencv and dataloader
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
def __getitem__(self, index):
sample = self.get_sample(index)
self.check_sample_types(sample)
sample = self.augment_sample(sample)
comp = self.input_transform(sample['image'])
real = self.input_transform(sample['real'])
mask = sample['mask'][np.newaxis, ...].astype(np.float32)
mask = np.where(mask > 0.5, 1, 0).astype(np.uint8)
output = {
'comp': comp,
'mask': mask,
'real': real,
'img_path':sample['img_path']
}
return output
def check_sample_types(self, sample):
assert sample['comp'].dtype == 'uint8'
if 'real' in sample:
assert sample['real'].dtype == 'uint8'
def augment_sample(self, sample):
if self.transform is None:
return sample
#print(self.transform.additional_targets.keys())
additional_targets = {target_name: sample[target_name]
for target_name in self.transform.additional_targets.keys()}
valid_augmentation = False
while not valid_augmentation:
aug_output = self.transform(image=sample['comp'], **additional_targets)
valid_augmentation = self.check_augmented_sample(sample, aug_output)
for target_name, transformed_target in aug_output.items():
#print(target_name,transformed_target.shape)
sample[target_name] = transformed_target
return sample
def check_augmented_sample(self, sample, aug_output):
if self.keep_background_prob < 0.0 or random.random() < self.keep_background_prob:
return True
return aug_output['mask'].sum() > 10
def get_sample(self, index):
fn = self.image_paths[index].split('.')[0]
composite_path = os.path.join(self.opt.dataset_root, 'composite_images', fn+'.jpg')
mask_path = os.path.join(self.opt.dataset_root, 'masks', fn+'.png')
target_path = os.path.join(self.opt.dataset_root, 'real_images', fn.split('_')[0]+'.jpg')
comp = cv2.imread(composite_path)
comp = cv2.cvtColor(comp, cv2.COLOR_BGR2RGB)
real = cv2.imread(target_path)
real = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)
mask = cv2.imread(mask_path)
mask = mask[:, :, 0].astype(np.float32) / 255.
return {'comp': comp, 'mask': mask, 'real': real,'img_path':composite_path}
def __len__(self):
"""Return the total number of images."""
return len(self.image_paths)
| 5,493 | 37.964539 | 141 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/dataset/__init__.py | """This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point from data loader.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
See our template dataset class 'template_dataset.py' for more details.
"""
import importlib
import torch.utils.data
from dataset.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()
return dataset
class CustomDatasetDataLoader():
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
print("dataset [%s] was created" % type(self.dataset).__name__)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads))
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
if i * self.opt.batch_size >= self.opt.max_dataset_size:
break
yield data
| 3,557 | 36.851064 | 176 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/dataset/ihd_dataset.py | import os.path
import os
import torch
import torchvision.transforms.functional as tf
from dataset.base_dataset import BaseDataset, get_transform
#from PIL import Image
import cv2
import numpy as np
import torchvision.transforms as transforms
import random
import torch.nn.functional as F
import copy
class IhdDataset(BaseDataset):
"""A template dataset class for you to implement custom datasets."""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.add_argument('--is_train', type=bool, default=True, help='whether in the training phase')
parser.set_defaults(max_dataset_size=float("inf"), new_dataset_option=2.0) # specify dataset-specific default values
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
"""
# save the option and dataset root
BaseDataset.__init__(self, opt)
self.image_paths = []
self.opt = copy.copy(opt)
self.isTrain = opt.is_train
if opt.is_train==True:
# print('loading training file: ')
self.trainfile = os.path.join(opt.dataset_root,'le50_train.txt')
self.keep_background_prob = 0.05 # 0.05
with open(self.trainfile,'r') as f:
for line in f.readlines():
self.image_paths.append(os.path.join(opt.dataset_root,line.rstrip()))
elif opt.is_train==False:
print('loading test file')
self.keep_background_prob = -1
if opt.is_val = True:
self.trainfile = os.path.join(opt.dataset_root,'le50_val.txt')
else:
self.trainfile = os.path.join(opt.dataset_root,'le50_test.txt')
with open(self.trainfile,'r') as f:
for line in f.readlines():
self.image_paths.append(os.path.join(opt.dataset_root,line.rstrip()))
self.transform = get_transform(opt)
self.input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
(0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)
)
])
# avoid the interlock problem of the opencv and dataloader
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
def __getitem__(self, index):
sample = self.get_sample(index)
self.check_sample_types(sample)
sample = self.augment_sample(sample)
comp = self.input_transform(sample['image'])
real = self.input_transform(sample['real'])
mask = sample['mask'][np.newaxis, ...].astype(np.float32)
mask = np.where(mask > 0.5, 1, 0).astype(np.uint8)
output = {
'comp': comp,
'mask': mask,
'real': real,
'img_path':sample['img_path']
}
return output
def check_sample_types(self, sample):
assert sample['comp'].dtype == 'uint8'
if 'real' in sample:
assert sample['real'].dtype == 'uint8'
def augment_sample(self, sample):
if self.transform is None:
return sample
#print(self.transform.additional_targets.keys())
additional_targets = {target_name: sample[target_name]
for target_name in self.transform.additional_targets.keys()}
valid_augmentation = False
while not valid_augmentation:
aug_output = self.transform(image=sample['comp'], **additional_targets)
valid_augmentation = self.check_augmented_sample(sample, aug_output)
for target_name, transformed_target in aug_output.items():
#print(target_name,transformed_target.shape)
sample[target_name] = transformed_target
return sample
def check_augmented_sample(self, sample, aug_output):
if self.keep_background_prob < 0.0 or random.random() < self.keep_background_prob:
return True
return aug_output['mask'].sum() > 10
def get_sample(self, index):
path = self.image_paths[index]
name_parts=path.split('_')
mask_path = self.image_paths[index].replace('composite_images','masks')
mask_path = mask_path.replace(('_'+name_parts[-1]),'.png')
target_path = self.image_paths[index].replace('composite_images','real_images')
target_path = target_path.replace(('_'+name_parts[-2]+'_'+name_parts[-1]),'.jpg')
comp = cv2.imread(path)
comp = cv2.cvtColor(comp, cv2.COLOR_BGR2RGB)
real = cv2.imread(target_path)
real = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)
mask = cv2.imread(mask_path)
mask = mask[:, :, 0].astype(np.float32) / 255.
return {'comp': comp, 'mask': mask, 'real': real,'img_path':path}
def __len__(self):
"""Return the total number of images."""
return len(self.image_paths)
| 5,665 | 38.347222 | 141 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/pytorch_iou/__init__.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
def _iou(pred, target, size_average = True):
b = pred.shape[0]
IoU = 0.0
for i in range(0,b):
#compute the IoU of the foreground
Iand1 = torch.sum(target[i,:,:,:]*pred[i,:,:,:])
Ior1 = torch.sum(target[i,:,:,:]) + torch.sum(pred[i,:,:,:])-Iand1
IoU1 = Iand1/Ior1
#IoU loss is (1-IoU1)
IoU = IoU + (1-IoU1)
return IoU/b
class IOU(torch.nn.Module):
def __init__(self, size_average = True):
super(IOU, self).__init__()
self.size_average = size_average
def forward(self, pred, target):
return _iou(pred, target, self.size_average)
| 730 | 24.206897 | 74 | py |
MadisNet-Inharmonious-Region-Localization | MadisNet-Inharmonious-Region-Localization-master/pytorch_ssim/__init__.py | # https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def _logssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
ssim_map = (ssim_map - torch.min(ssim_map))/(torch.max(ssim_map)-torch.min(ssim_map))
ssim_map = -torch.log(ssim_map + 1e-8)
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class LOGSSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(LOGSSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _logssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
| 4,529 | 34.952381 | 104 | py |
thundergbm | thundergbm-master/python/benchmarks/experiments.py | import utils.data_utils as du
from model.catboost_model import CatboostModel
from model.lightgbm_model import LightGBMModel
from model.xgboost_model import XGboostModel
from model.thundergbm_model import ThunderGBMModel
from model.datasets import Dataset
import utils.file_utils as fu
import pandas as pd
import math
import sys
class Experiment:
def __init__(self, data_func, name, task, metric):
self.data = Dataset(data_func())
self.name = name
self.task = task
self.metric = metric
exp_datasets = [
# Dataset(name='SUSY', task='Regression', metric='RMSE', get_func=du.get_susy),
# Dataset(name='covtype', task='Regression', metric='RMSE', get_func=du.get_covtype),
# Dataset(name='real-sim', task='Regression', metric='RMSE', get_func=du.get_realsim),
# Dataset(name='cifar', task='Regression', metric='RMSE', get_func=du.get_cifar),
# Dataset(name='higgs', task='Regression', metric='RMSE', get_func=du.get_higgs),
# Dataset(name='higgs', task='Regression', metric='RMSE', get_func=du.get_higgs),
# Dataset(name='log1p', task='Regression', metric='RMSE', get_func=du.get_log1p)
Dataset(name='cifar10', task='Multiclass classification', metric='Accuracy', get_func=du.get_cifar10),
# Dataset(name='news20', task='Multiclass classification', metric='Accuracy', get_func=du.get_news20),
# Dataset(name='yahoo', task='Ranking', metric='NDCG', get_func=du.get_yahoo)
]
def higgs():
return Dataset(name='higgs', task='Regression', metric='RMSE', get_func=du.get_higgs)
def log1p():
return Dataset(name='log1p', task='Regression', metric='RMSE', get_func=du.get_log1p)
def cifar():
return Dataset(name='cifar10', task='Multiclass classification', metric='Accuracy', get_func=du.get_cifar10)
def news20():
return Dataset(name='news20', task='Multiclass classification', metric='Accuracy', get_func=du.get_news20)
def r_model(data, currentModel, df):
elapsed, metric = currentModel.run_model(data=data)
name = None
if isinstance(currentModel, ThunderGBMModel):
name = 'ThunderGBM(depth=' + str(currentModel.max_depth) + ')-' + currentModel.tree_method
elif isinstance(currentModel, XGboostModel):
use_gpu = 'gpu' if currentModel.use_gpu else 'cpu'
name = 'XGBoost(depth=' + str(currentModel.max_depth) + ')-' + use_gpu
elif isinstance(currentModel, CatboostModel):
use_gpu = 'gpu' if currentModel.use_gpu else 'cpu'
name = 'CatBoost(depth=' + str(currentModel.max_depth) + ')-' + use_gpu
elif isinstance(currentModel, LightGBMModel):
use_gpu = 'gpu' if currentModel.use_gpu else 'cpu'
name = 'LightGBM(num_rounds=' + str(currentModel.num_rounds) + ')-' + use_gpu
fu.add_data(df, name, data, elapsed, metric)
return elapsed, metric
def do_exps():
# xgb_model_cpu = XGboostModel()
# xgb_model_cpu.use_gpu = False
# xgb_model_cpu.use_exact = True
df = pd.DataFrame()
# model = XGboostModel()
model = LightGBMModel()
# model = CatboostModel()
# model.use_gpu = False
result_file = open(model.model_name() + '.result', 'a')
for exp in exp_datasets:
used_times = []
metrics = []
for i in range(1, 6, 1):
result_str = "## Model: " + model.model_name() + " ##\n## Dataset: " + exp.name + " ##\n"
ut, mc = r_model(exp, model, df)
fu.write_results(df, 'result.csv', 'csv')
used_times.append(ut)
metrics.append(mc)
result_str += ", ".join([str(x) for x in used_times]) + "\n"
result_str += ", ".join([str(x) for x in metrics]) + "\n\n\n"
print(result_str)
result_file.write(result_str)
result_file.close()
# # cbModel_gpu = CatboostModel()
# # cbModel_cpu = CatboostModel()
# # lgbModel_gpu = LightGBMModel()
# # lgbModel_cpu = LightGBMModel()
# # xgbModel_gpu = XGboostModel()
# # xgbModel_cpu = XGboostModel()
# # tgbModel_hist = ThunderGBMModel()
# tgbModel_exact = ThunderGBMModel()
# tgbModel_exact.tree_method = 'auto'
# # tgbModel_hist.tree_method = 'hist'
# # xgbModel_cpu.use_gpu = False
# # xgbModel_gpu.use_exact = False
# # cbModel_cpu.use_gpu = False
# # lgbModel_cpu.use_gpu = False
#
# df = pd.DataFrame()
# # for i in [10 * int(math.pow(2, x)) for x in range(2, 3)]:
# # tgbModel.num_rounds = i
# # xgbModel_gpu.num_rounds = i
# # xgbModel_cpu.num_rounds = i
# # cbModel_cpu.num_rounds = i
# # cbModel_gpu.num_rounds = i
# # lgbModel_gpu.num_rounds = i
# #
# # for exp in exp_datasets:
# # # r_model(exp, cbModel_gpu, df)
# # # r_model(exp, cbModel_cpu, df)
# # # r_model(exp, lgbModel_gpu, df)
# # # r_model(exp, lgbModel_cpu, df)
# # # r_model(exp, xgbModel_gpu, df)
# # # r_model(exp, xgbModel_cpu, df)
# # r_model(exp, tgbModel, df)
# # fu.write_results(df, 'result3.csv', 'csv')
#
#
# for depth in range(14, 17, 2):
# tgbModel_exact.max_depth = depth
# for exp in exp_datasets:
# # r_model(exp, cbModel_gpu, df)
# # r_model(exp, cbModel_cpu, df)
# # r_model(exp, lgbModel_gpu, df)
# # r_model(exp, lgbModel_cpu, df)
# # r_model(exp, xgbModel_gpu, df)
# # r_model(exp, xgbModel_cpu, df)
# # tgbModel_hist.max_depth = depth
# # r_model(exp, tgbModel_hist, df)
# r_model(exp, tgbModel_exact, df)
# print("----------------->>>>>depth: " + str(depth))
# fu.write_results(df, 'result3.csv', 'csv')
def load_dataset(dataset_name):
if dataset_name == 'higgs':
return higgs()
elif dataset_name == 'log1p':
return log1p()
elif dataset_name == 'cifar':
return cifar()
elif dataset_name == 'news20':
return news20()
def do_exps_with_command(model_name, dataset_name, use_gpu=True):
model = None
result_path = ''
if model_name == 'xgboost':
model = XGboostModel()
result_path = 'xgb.txt'
elif model_name == 'catboost':
model = CatboostModel()
result_path = 'cbt.txt'
elif model_name == 'lightgbm':
model = LightGBMModel()
result_path = 'lgb.txt'
elif model_name == 'thundergbm':
model = ThunderGBMModel()
result_path = 'tgb.txt'
else:
print('illegal model name...')
model.use_gpu = use_gpu
dataset = load_dataset(dataset_name)
df = pd.DataFrame()
result_file = open(result_path, 'a')
used_times = []
metrics = []
for i in range(1, 6, 1):
result_str = "## Model: " + model.model_name() + " ##\n## Dataset: " \
+ dataset.name + " ##\n"
ut, mc = r_model(dataset, model, df)
fu.write_results(df, 'result.csv', 'csv')
used_times.append(ut)
metrics.append(mc)
result_str += ", ".join([str(x) for x in used_times]) + "\n"
result_str += ", ".join([str(x) for x in metrics]) + "\n\n\n"
print(result_str)
result_file.write(result_str)
result_file.close()
if __name__ == "__main__":
print(sys.argv)
model_name = sys.argv[1]
use_gpu = True
if sys.argv[2] == 'cpu':
use_gpu = False
dataset_name = sys.argv[3]
do_exps_with_command(model_name, dataset_name, use_gpu)
# do_exps()
| 7,521 | 36.054187 | 112 | py |
thundergbm | thundergbm-master/python/benchmarks/model/xgboost_model.py | from model.base_model import BaseModel
import numpy as np
import xgboost as xgb
import time
import utils.data_utils as du
from model.datasets import Dataset
class XGboostModel(BaseModel):
def __init__(self, use_exact=False, debug_verose=1):
BaseModel.__init__(self)
self.use_exact = use_exact
self.debug_verose = debug_verose
def _config_model(self, data):
self.params['max_depth'] = self.max_depth
self.params['learning_rate'] = self.learning_rate
self.params['min_split_loss'] = self.min_split_loss
self.params['min_child_weight'] = self.min_weight
self.params['alpha'] = self.L1_reg
self.params['lambda'] = self.L2_reg
self.params['debug_verbose'] = self.debug_verose
self.params['max_bin'] = self.max_bin
if self.use_gpu:
self.params['tree_method'] = ('gpu_exact' if self.use_exact
else 'gpu_hist')
self.params['n_gpus'] = 1
else:
self.params['nthread'] = 20
self.params['tree_method'] = ('exact' if self.use_exact else 'hist')
self.params["predictor"] = "gpu_predictor"
if data.task == "Regression":
self.params["objective"] = "reg:squarederror"
elif data.task == "Multiclass classification":
self.params["objective"] = "multi:softprob"
self.params["num_class"] = int(np.max(data.y_test) + 1)
elif data.task == "Classification":
self.params["objective"] = "binary:logistic"
elif data.task == "Ranking":
self.params["objective"] = "rank:ndcg"
else:
raise ValueError("Unknown task: " + data.task)
def _train_model(self, data):
print(self.params)
dtrain = xgb.DMatrix(data.X_train, data.y_train)
if data.task == 'Ranking':
dtrain.set_group(data.groups)
t_start = time.time()
self.model = xgb.train(self.params, dtrain, self.num_rounds, [(dtrain, "train")])
elapsed_time = time.time() - t_start
return elapsed_time
def _predict(self, data):
dtest = xgb.DMatrix(data.X_test, data.y_test)
if data.task == 'Ranking':
dtest.set_group(data.groups)
pred = self.model.predict(dtest)
metric = self.eval(data=data, pred=pred)
return metric
def model_name(self):
name = "xgboost_"
use_cpu = "gpu_" if self.use_gpu else "cpu_"
nr = str(self.num_rounds) + "_"
return name + use_cpu + nr + str(self.max_depth)
if __name__ == "__main__":
X, y, groups = du.get_yahoo()
dataset = Dataset(name='yahoo', task='Ranking', metric='NDCG', get_func=du.get_yahoo)
print(dataset.X_train.shape)
print(dataset.y_test.shape)
t_start = time.time()
xgbModel = XGboostModel()
xgbModel.use_gpu = False
xgbModel.run_model(data=dataset)
eplased = time.time() - t_start
print("--------->> " + str(eplased)) | 3,015 | 33.272727 | 89 | py |
thundergbm | thundergbm-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# ThunderSVM documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 28 23:38:46 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# sys.path.insert(0, os.path.abspath('.'))
import recommonmark
from recommonmark import transform
AutoStructify = transform.AutoStructify
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ThunderGBM'
copyright = u'2019, ThunderGBM Developers'
author = u'ThunderGBM Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ThunderGBMdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ThunderGBM.tex', u'ThunderGBM Documentation',
u'Zeyi Wen', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'thundergbm', u'ThunderGBM Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ThunderGBM', u'ThunderGBM Documentation',
author, 'ThunderGBM', 'One line description of project.',
'Miscellaneous'),
]
github_doc_root = 'https://github.com/rtfd/recommonmark/tree/master/doc/'
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'enable_eval_rst': True,
}, True)
app.add_transform(AutoStructify)
| 5,511 | 29.622222 | 79 | py |
PoseTriplet | PoseTriplet-main/estimator/posegan_evaluate.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import multiprocessing
import os
import pickle
import random
from time import time
import numpy as np
import torch
from bvh_skeleton import humanoid_1205_skeleton
from common.arguments import parse_args
from common.camera2world import camera_to_worldByPCA
from common.loss import *
from common.model import *
from function.gan_utils import pose_seq_bl_reset
from function.utils import mkd
from function.viz import Wrap_plot_seq_gif
from posegan_basementclass import PoseGANBasement
'''
inference file
'''
class PoseGAN(PoseGANBasement):
def __init__(self, args):
PoseGANBasement.__init__(self, args)
# init param
# self.augment_len = 32
self.MSE = nn.MSELoss(reduction='mean').to(self.device)
# prepare data and dataloader
self.data_preparation()
self.dataloader_preparation()
# prepare model
self.model_preparation()
def model_preparation(self):
self._model_preparation_pos()
self._model_preparation_traj()
def fit(self, args):
###################################
# Train start here.
###################################
# load pretrain.
if args.pretrain:
self.logger.info('Check pretrain model performance.')
val_rlt = {}
for val_set_key in self.val_generator_dict:
self.evaluate_posenet(tag='fake', valset=val_set_key)
self.evaluate_trajnet(tag='fake', valset=val_set_key)
# for val_set_key in self.val_generator_dict:
# val_rlt[val_set_key] = self.evaluate_posenet(tag='real', valset=val_set_key)
# self.summary.summary_epoch_update()
# vis result on s15678
val_set_key = 's15678'
# val_set_key = 's15678_flip'
tag = 'fake'
self.save_result(valset=val_set_key)
self.vis_result(tag='fake', valset=val_set_key)
def evaluate_posenet(self, tag='real', valset='s911'):
"""
evaluate the performance of posenet on 3 kinds of dataset
check every clip performance.
"""
start_time = time()
# End-of-epoch evaluation
with torch.no_grad():
self.model_pos.load_state_dict(self.model_pos_train.state_dict())
self.model_pos.eval()
epoch_p1_3d_valid = 0
N_valid = 0
test_generator = self.val_generator_dict[valset]
for cam_ex, batch, batch_2d in test_generator.next_epoch():
inputs_3d = torch.from_numpy(batch.astype('float32'))
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
if torch.cuda.is_available():
inputs_3d = inputs_3d.cuda()
inputs_2d = inputs_2d.cuda()
# inputs_3d[:, :, 0] = 0
inputs_3d[:, :, :, :] = inputs_3d[:, :, :, :] - inputs_3d[:, :, :1, :]
# Predict 3D poses
predicted_3d_pos = self.model_pos(inputs_2d)
# Test-time augmentation (if enabled)
if test_generator.augment_enabled():
# Undo flipping and take average with non-flipped version
predicted_3d_pos[1, :, :, 0] *= -1
predicted_3d_pos[1, :, test_generator.joints_left + test_generator.joints_right] = \
predicted_3d_pos[1, :, test_generator.joints_right + test_generator.joints_left]
predicted_3d_pos = torch.mean(predicted_3d_pos, dim=0, keepdim=True)
inputs_3d = inputs_3d[:1]
loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d)
epoch_p1_3d_valid += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_pos.item()
N_valid += inputs_3d.shape[0] * inputs_3d.shape[1]
# batch-wise log.
self.writer.add_scalar('eval_P_iter_{}/{}_p1'.format(tag, valset), loss_3d_pos.item() * 1000,
self.summary.test_iter_num)
self.summary.summary_test_iter_num_update()
# analysis result
epoch_p1_3d_valid = epoch_p1_3d_valid / N_valid * 1000
elapsed = (time() - start_time) / 60
# epoch-wise log.
self.writer.add_scalar('eval_P_epoch_{}/{}_p1'.format(tag, valset), epoch_p1_3d_valid, self.summary.epoch)
return
def evaluate_trajnet(self, tag='real', valset='s911'):
"""
evaluate the performance of posenet on 3 kinds of dataset
"""
start_time = time()
# End-of-epoch evaluation
with torch.no_grad():
self.model_traj.load_state_dict(self.model_traj_train.state_dict())
self.model_traj.eval()
epoch_p1_3d_valid = 0
N_valid = 0
self.summary.test_iter_num = 0 # reset here.
# Evaluate on test set
for cam, batch, batch_2d in self.val_generator_dict[valset].next_epoch():
inputs_3d = torch.from_numpy(batch.astype('float32'))
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
if torch.cuda.is_available():
inputs_3d = inputs_3d.cuda()
inputs_2d = inputs_2d.cuda()
# else:
target_3d_traj = inputs_3d[:, :, :1, :] * 1. # focus on root traj.
# Predict 3D trajes
predicted_3d_traj = self.model_traj(inputs_2d)
loss_3d_traj = mpjpe(predicted_3d_traj, target_3d_traj)
epoch_p1_3d_valid += target_3d_traj.shape[0] * target_3d_traj.shape[1] * loss_3d_traj.item()
N_valid += target_3d_traj.shape[0] * target_3d_traj.shape[1]
# batch-wise log.
self.writer.add_scalar('eval_T_iter_{}_traj_error/{}'.format(tag, valset), loss_3d_traj.item() * 1000,
self.summary.test_iter_num)
# check vel.
max_traj_pred = torch.max(torch.norm(predicted_3d_traj, dim=len(predicted_3d_traj.shape)-1))
max_traj_gt = torch.max(torch.norm(target_3d_traj, dim=len(target_3d_traj.shape)-1))
max_traj_error = torch.max(torch.norm(predicted_3d_traj-target_3d_traj, dim=len(target_3d_traj.shape)-1))
self.writer.add_scalar('eval_T_iter_{}_max_traj_pred/{}'.format(tag, valset),
max_traj_pred.item() * 1000, self.summary.test_iter_num)
self.writer.add_scalar('eval_T_iter_{}_max_traj_gt/{}'.format(tag, valset),
max_traj_gt.item() * 1000, self.summary.test_iter_num)
self.writer.add_scalar('eval_T_iter_{}_max_traj_error/{}'.format(tag, valset),
max_traj_error.item() * 1000, self.summary.test_iter_num)
self.summary.summary_test_iter_num_update()
# analysis result
epoch_p1_3d_valid = epoch_p1_3d_valid / N_valid * 1000
elapsed = (time() - start_time) / 60
# epoch-wise log.
self.writer.add_scalar('eval_T_epoch_{}/{}_p1'.format(tag, valset), epoch_p1_3d_valid, self.summary.epoch)
return {'p1': epoch_p1_3d_valid}
def vis_result(self, tag='real', valset='s911'):
"""
evaluate the performance of posenet on 3 kinds of dataset
check every clip performance.
"""
start_time = time()
# End-of-epoch evaluation
with torch.no_grad():
self.model_pos.load_state_dict(self.model_pos_train.state_dict()) # 这个操作我很喜欢
self.model_pos.eval()
self.model_traj.load_state_dict(self.model_traj_train.state_dict()) # 这个操作我很喜欢
self.model_traj.eval()
epoch_p1_3d_valid = 0
N_valid = 0
batch_num = 0
# Evaluate on test set
test_generator = self.val_generator_dict[valset]
for cam_ex, batch, batch_2d in test_generator.next_epoch():
inputs_3d = torch.from_numpy(batch.astype('float32'))
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
cam_ex = torch.from_numpy(cam_ex.astype('float32'))
if torch.cuda.is_available():
inputs_3d = inputs_3d.cuda()
inputs_2d = inputs_2d.cuda()
cam_ex = cam_ex.cuda()
# inputs_3d[:, :, 0] = 0
inputs_3d_origin = inputs_3d * 1. # a copy.
inputs_3d[:, :, :, :] = inputs_3d[:, :, :, :] - inputs_3d[:, :, :1, :]
# Predict 3D poses
predicted_3d_pos = self.model_pos(inputs_2d)
# Test-time augmentation (if enabled)
if test_generator.augment_enabled():
# Undo flipping and take average with non-flipped version
predicted_3d_pos[1, :, :, 0] *= -1
predicted_3d_pos[1, :, test_generator.joints_left + test_generator.joints_right] = \
predicted_3d_pos[1, :, test_generator.joints_right + test_generator.joints_left]
predicted_3d_pos = torch.mean(predicted_3d_pos, dim=0, keepdim=True)
inputs_3d = inputs_3d[:1]
cam_ex = cam_ex[:1]
# Predict 3D traj
predicted_3d_traj = self.model_traj(inputs_2d)
predicted_3d_traj_withroot = predicted_3d_traj[:1]
# combine root and pose.
predicted_3d_pos_withroot = predicted_3d_pos + predicted_3d_traj_withroot
# convert to the world space.
# predicted_3d_wpos_withroot = cam2world_sktpos(predicted_3d_pos_withroot)
# predicted_3d_wpos_withroot = camera_to_worldByTensor(predicted_3d_pos_withroot, cam_ex[..., :4], cam_ex[..., 4:])
# predicted_3d_wpos_withroot = camera_to_worldByTensor(predicted_3d_pos_withroot, cam_ex[..., :4],
# torch.zeros_like(cam_ex[..., 4:]))
predicted_3d_wpos_withroot = camera_to_worldByPCA(predicted_3d_pos_withroot)
# visualize result
if batch_num % 100 == 0 or batch_num in [599]:
lables = ['predict_cam3d', 'input_cam3d', 'input_cam2d', 'predict_withroot_cam3d',
'predict_withroot_world']
clip_len = predicted_3d_pos.shape[1]
vis_len = clip_len if clip_len < 1000 else 1000
downsample_idx = np.arange(0, vis_len, 10)
seqs = self._zip_GIFplot_array([
predicted_3d_pos[:, downsample_idx], inputs_3d_origin[:, downsample_idx],
inputs_2d[:, downsample_idx], predicted_3d_pos_withroot[:, downsample_idx],
predicted_3d_wpos_withroot[:, downsample_idx],
])
gif_save_path = os.path.join(args.checkpoint, 'EvaluationGif/{}/epoch{:0>3d}_batch{:0>3d}.gif'.format(valset,
self.summary.epoch, batch_num))
self.logger.info('plotting image-->{}'.format(gif_save_path))
Wrap_plot_seq_gif(seqs=seqs, labs=lables, save_path=gif_save_path)
batch_num = batch_num + 1
return
def save_result(self, valset):
"""
evaluate and save the s15678 / s15678_flip
"""
start_time = time()
# result_lst = []
result_all_lst = []
# End-of-epoch evaluation
with torch.no_grad():
self.model_pos.load_state_dict(self.model_pos_train.state_dict())
self.model_pos.eval()
self.model_traj.load_state_dict(self.model_traj_train.state_dict())
self.model_traj.eval()
# Evaluate on test set
test_generator = self.val_generator_dict[valset]
for cam_ex, batch, batch_2d in test_generator.next_epoch():
# for cam_ex, batch, batch_2d in self.val_generator_dict[valset].next_epoch():
inputs_3d = torch.from_numpy(batch.astype('float32'))
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
cam_ex = torch.from_numpy(cam_ex.astype('float32'))
if torch.cuda.is_available():
inputs_3d = inputs_3d.cuda()
inputs_2d = inputs_2d.cuda()
cam_ex = cam_ex.cuda()
# inputs_3d[:, :, 0] = 0
inputs_3d_origin = inputs_3d * 1. # a copy.
inputs_3d[:, :, :, :] = inputs_3d[:, :, :, :] - inputs_3d[:, :, :1, :]
# Predict 3D poses
predicted_3d_pos = self.model_pos(inputs_2d)
# Test-time augmentation (if enabled)
if test_generator.augment_enabled():
# Undo flipping and take average with non-flipped version
predicted_3d_pos[1, :, :, 0] *= -1
predicted_3d_pos[1, :, test_generator.joints_left + test_generator.joints_right] = \
predicted_3d_pos[1, :, test_generator.joints_right + test_generator.joints_left]
predicted_3d_pos = torch.mean(predicted_3d_pos, dim=0, keepdim=True)
inputs_3d = inputs_3d[:1]
cam_ex = cam_ex[:1]
# Predict 3D traj
predicted_3d_traj = self.model_traj(inputs_2d)
predicted_3d_traj_withroot = predicted_3d_traj[:1]
# combine root and pose.
predicted_3d_pos_withroot = predicted_3d_pos + predicted_3d_traj_withroot
# change to world space
# predicted_3d_wpos_withroot = cam2world_sktpos(predicted_3d_pos_withroot)
# predicted_3d_wpos_withroot = camera_to_worldByTensor(predicted_3d_pos_withroot, cam_ex[..., :4], cam_ex[..., 4:])
# predicted_3d_wpos_withroot = camera_to_worldByTensor(predicted_3d_pos_withroot, cam_ex[..., :4],
# torch.zeros_like(cam_ex[..., 4:]))
predicted_3d_wpos_withroot = camera_to_worldByPCA(predicted_3d_pos_withroot, cam_ex[..., :4])
# fake some qpos data, later will be replaced.
predicted_3d_qpos = np.random.randn(inputs_3d.shape[1], 59)
predicted_3d_qpos[:, :7] = np.array([0,0,2,0,0,0,1])
result_all_lst.append({
'cam_ex': cam_ex.detach().cpu().numpy()[0],
'inputs_2d': inputs_2d.detach().cpu().numpy()[0, self.pad:-self.pad],
'inputs_3d': inputs_3d_origin.detach().cpu().numpy()[0],
'predicted_3d': predicted_3d_pos_withroot.detach().cpu().numpy()[0],
'predicted_3d_wpos': predicted_3d_wpos_withroot.detach().cpu().numpy()[0],
'predicted_3d_qpos': predicted_3d_qpos,
})
##########################################################
# save result.
# result_dict = {}
takes = ['h36m_take_{:0>3d}'.format(i) for i in range(600)]
# takes = ['h36m_take_{:0>3d}'.format(i) for i in range(60)]
result_all_dict = {}
for i, take in enumerate(takes):
result_all_dict[take] = result_all_lst[i]
mkd(self.args.traj_save_path)
with open(self.args.traj_save_path, 'wb') as f:
pickle.dump(result_all_dict, f)
# save bvh in multi-process.
# for i, take in enumerate(takes):
# predicted_3d_wpos_withroot = result_all_dict[take]['predicted_3d_wpos']
# bvhfileName = self.args.traj_save_path.replace('traj_dict/traj_dict.pkl', 'traj_bvh/'+take+'.bvh')
# self.write_standard_bvh(bvhfileName, predicted_3d_wpos_withroot)
self.write_standard_bvh_multi_process(takes, result_all_dict)
##########################################################
return
def write_standard_bvh_multi_process(self, takes, result_all_dict):
def wrap_write_standard_bvh(take):
predicted_3d_wpos_withroot = np.copy(result_all_dict[take]['predicted_3d_wpos'])
# reset bl to rl setting
predicted_3d_wpos_withroot = pose_seq_bl_reset(torch.from_numpy(predicted_3d_wpos_withroot)).numpy()
# ground_z = np.min(predicted_3d_wpos_withroot[:, :, -1:])
ground_z = np.min(predicted_3d_wpos_withroot[:, :, -1:], axis=(1,2), keepdims=True)
predicted_3d_wpos_withroot[:, :, -1:] = predicted_3d_wpos_withroot[:, :, -1:] - ground_z
bvhfileName = self.args.traj_save_path.replace('traj_dict/traj_dict.pkl', 'traj_bvh/'+take+'.bvh')
self.write_standard_bvh(bvhfileName, predicted_3d_wpos_withroot)
# start
task_lst = takes
num_threads = args.num_threads
for ep in range(math.ceil(len(task_lst) / num_threads)):
p_lst = []
for i in range(num_threads):
idx = ep * num_threads + i
if idx >= len(task_lst):
break
p = multiprocessing.Process(target=wrap_write_standard_bvh, args=(task_lst[idx],))
p_lst.append(p)
for p in p_lst:
p.start()
for p in p_lst:
p.join()
print('complete ep:', ep)
# end.
def write_standard_bvh(self, bvhfileName, prediction3dpoint):
'''
:param outbvhfilepath:
:param prediction3dpoint:
:return:
'''
# scale 100 for bvhacker vis.
for frame in prediction3dpoint:
for point3d in frame:
point3d[0] *= 100
point3d[1] *= 100
point3d[2] *= 100
mkd(bvhfileName)
# 16 joint to 21 joint
Converter = humanoid_1205_skeleton.SkeletonConverter()
prediction3dpoint = Converter.convert_to_21joint(prediction3dpoint)
# save bvh.
human36m_skeleton = humanoid_1205_skeleton.H36mSkeleton()
human36m_skeleton.poses2bvh(prediction3dpoint, output_file=bvhfileName)
if __name__ == '__main__':
args = parse_args()
# fix random
random_seed = args.random_seed # default 0
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
os.environ['PYTHONHASHSEED'] = str(random_seed)
# copy from #https://pytorch.org/docs/stable/notes/randomness.html
torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = True
traj_folder = os.path.abspath(os.path.join(args.traj_save_path, os.pardir))
args.checkpoint = os.path.join(traj_folder, 'vpose_log')
mod = PoseGAN(args)
mod.fit(args)
mod.writer.close()
| 19,271 | 43.714617 | 131 | py |
PoseTriplet | PoseTriplet-main/estimator/posegan_train.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import random
from time import time
import numpy as np
import torch
from torch.autograd import Variable
from common.arguments import parse_args
from common.camera import *
from common.model import *
from function.gan_utils import pose_seq_bl_aug_batch
from function.utils import set_grad, get_discriminator_accuracy
from function.viz import Wrap_plot_seq_gif
from posegan_basementclass import PoseGANBasement
from progress.bar import Bar
'''
training file
'''
class PoseGAN(PoseGANBasement):
def __init__(self, args):
PoseGANBasement.__init__(self, args)
# init param
# self.augment_len = 32
self.MSE = nn.MSELoss(reduction='mean').to(self.device)
# prepare data and dataloader
self.data_preparation()
self.dataloader_preparation()
self.s911_detect2d_dataloader_preparation()
# prepare model and optimizer
self.model_preparation()
def fit(self, args):
###################################
# Train start here.
###################################
# load pretrain if used.
if args.pretrain:
self.logger.info('Check pretrain model performance.')
val_rlt = {}
for val_set_key in self.val_generator_dict:
self.evaluate_posenet(tag='fake', valset=val_set_key)
self.evaluate_trajnet(tag='fake', valset=val_set_key)
self.evaluate_posenet_withPCK(tag='fake', valset='3dhp_flip')
for val_set_key in self.val_generator_dict:
val_rlt[val_set_key] = self.evaluate_posenet(tag='real', valset=val_set_key)
self.evaluate_posenet_withPCK(tag='real', valset='3dhp_flip')
self.summary.summary_epoch_update()
for epoch in range(args.epochs):
epoch_start_time = time()
self.train_posegan()
if self.summary.epoch > args.P_start_ep: # start record the performance when P training start.
val_rlt = {}
for val_set_key in self.val_generator_dict:
val_rlt[val_set_key] = self.evaluate_posenet(tag='fake', valset=val_set_key)
self.evaluate_trajnet(tag='fake', valset=val_set_key)
self.evaluate_posenet_withPCK(tag='fake', valset='3dhp_flip')
if args.add_random_cam:
self.update_fixedfake_train_generator()
self.train_posenet_realpose()
self.train_trajnet_realpose()
for val_set_key in self.val_generator_dict:
val_rlt[val_set_key] = self.evaluate_posenet(tag='real', valset=val_set_key)
self.evaluate_posenet_withPCK(tag='real', valset='3dhp_flip') # 单独开一个
# log
self.logging(val_rlt, epoch_start_time)
# udpate per epoch
self.lr_scheduler_P.step()
self.lr_scheduler_Gcam.step()
self.lr_scheduler_Dcam.step()
self.summary.summary_epoch_update()
def model_preparation(self):
self._model_preparation_pos()
self._model_preparation_traj()
self._model_preparation_Gcam()
def train_posenet_fakepose_camed(self, cam_rlt_dict, tag='_fake'):
epoch_loss_3d_train = 0
N = 0
self.model_pos_train.train()
# prepare fake batch
pose3D_camed = cam_rlt_dict['pose3D_camed'].detach()
pose2D_camed = cam_rlt_dict['pose2D_camed'].detach()
inputs_2d = pose2D_camed.detach()
inputs_3d = pose3D_camed.detach()[:, 0 + self.pad:0 + self.pad + 1]
# now get fake data ready for train.
loss_3d_pos, epoch_loss_3d_train, N = self._train_batch_posenet(inputs_2d.detach(), inputs_3d.detach(),
epoch_loss_3d_train, N)
# batch-wise log
self.writer.add_scalar('train_P_batch/{}/loss_3d_pos'.format(tag), loss_3d_pos.item(),
self.summary.train_fakepose_iter_num)
self.summary.summary_train_fakepose_iter_num_update()
def train_trajnet_fakepose_camed(self, cam_rlt_dict, tag='_fake'):
epoch_loss_3d_train = 0
N = 0
self.model_traj_train.train()
# prepare fake batch
pose3D_camed = cam_rlt_dict['pose3D_camed'].detach()
pose2D_camed = cam_rlt_dict['pose2D_camed'].detach()
inputs_2d = pose2D_camed.detach()
inputs_3d = pose3D_camed.detach()[:, 0 + self.pad:0 + self.pad + 1]
# now get fake data ready for train.
loss_3d_traj, epoch_loss_3d_train, N = self._train_batch_trajnet(inputs_2d.detach(), inputs_3d.detach(),
epoch_loss_3d_train, N)
# batch-wise log
self.writer.add_scalar('train_T_batch/{}/loss_3d_traj'.format(tag), loss_3d_traj.item(),
self.summary.train_faketraj_iter_num)
self.summary.summary_train_faketraj_iter_num_update()
def adv_loss(self, model_dis, data_real, data_fake, writer_name):
# Adversarial losses for 3D squence
real_3d = model_dis(data_real)
fake_3d = model_dis(data_fake)
real_label_3d = Variable(torch.ones(real_3d.size())).to(self.device)
fake_label_3d = Variable(torch.zeros(fake_3d.size())).to(self.device)
adv_3d_loss = self.MSE(real_3d, fake_3d)
# adv_3d_real_loss = self.MSE(real_3d, fake_label_3d)
# adv_3d_fake_loss = self.MSE(fake_3d, real_label_3d)
# # Total discriminators losses
# adv_3d_loss = (adv_3d_real_loss + adv_3d_fake_loss) * 0.5
# monitor training process
###################################################
real_acc = get_discriminator_accuracy(real_3d.reshape(-1), real_label_3d.reshape(-1))
fake_acc = get_discriminator_accuracy(fake_3d.reshape(-1), fake_label_3d.reshape(-1))
self.writer.add_scalar(writer_name + '_real_acc', real_acc, self.summary.train_iter_num)
self.writer.add_scalar(writer_name + '_fake_acc', fake_acc, self.summary.train_iter_num)
self.writer.add_scalar(writer_name + '_adv_loss', adv_3d_loss.item(), self.summary.train_iter_num)
return adv_3d_loss
def train_posegan(self):
"""
"""
start_time = time()
batch_num = 0
self.model_Gcam.train()
self.model_Dcam.train()
bar = Bar('Train pose gan', max=self.aug_generator.num_batches)
for _, _, batch_3d, batch_2d, _ in self.aug_generator.next_epoch():
inputs_3d = torch.from_numpy(batch_3d.astype('float32')) # b x t x j x 3
inputs_2d = torch.from_numpy(batch_2d.astype('float32')) # b x t x j x 2
if torch.cuda.is_available():
inputs_3d = inputs_3d.cuda()
inputs_2d = inputs_2d.cuda()
# random bl augment
inputs_3d = pose_seq_bl_aug_batch(inputs_3d)
inputs_3dworld_origin = cam2world_sktpos(inputs_3d)
# train gan
##################################################
####### Train Generator #################
##################################################
set_grad([self.model_Gcam], True)
set_grad([self.model_Dcam], False)
self.optimizer_Gcam.zero_grad()
###################################################
reset_root = inputs_3d[:, :1, :1, :] * 1.0
pose_recoverd_uncamed = inputs_3d * 1. - reset_root
cam_rlt_dict = self.model_Gcam(pose_recoverd_uncamed)
pose2D_camed = cam_rlt_dict['pose2D_camed']
adv_cam_loss = self.adv_loss(self.model_Dcam, inputs_2d, pose2D_camed,
writer_name='train_G_iter_acc/gcam')
# Update generators
###################################################
adv_cam_loss.backward()
nn.utils.clip_grad_norm_(self.model_Gcam.parameters(), max_norm=1)
self.optimizer_Gcam.step()
################################################
####### Train PoseNet #################
################################################
if self.summary.epoch > args.P_start_ep:
self.train_posenet_fakepose_camed(cam_rlt_dict)
self.train_trajnet_fakepose_camed(cam_rlt_dict)
##################################################
####### Train Discriminator #############
##################################################
d3d_real_acc, d3d_fake_acc = 0, 0
if self.summary.train_iter_num % args.df == 0:
set_grad([self.model_Gcam], False)
set_grad([self.model_Dcam], True)
# train Dcam
d3d_real_acc, d3d_fake_acc = self._train_dis(model_dis=self.model_Dcam,
data_real=self.random_aug_d2d(inputs_2d),
data_fake=pose2D_camed,
writer_name='train_D_iter_acc/dcam',
fake_data_pool=self.fake_cam_sample,
optimizer=self.optimizer_Dcam)
# visualize result
if self.summary.train_iter_num % 5000 == 0:
lables = ['input_world', 'input_cam3d', 'input_cam2d', 'pose_recoverd_uncamed_cam3d', 'RT_cam3d', 'RT_cam2d']
seqs = self._zip_GIFplot_array([
inputs_3dworld_origin, inputs_3d, inputs_2d,
pose_recoverd_uncamed, cam_rlt_dict['pose3D_camed'], cam_rlt_dict['pose2D_camed']
])
gif_save_path = os.path.join(args.checkpoint, 'trainingGif/epoch{:0>3d}_batch{:0>3d}.gif'.format(
self.summary.epoch, batch_num))
self.logger.info('plotting image-->{}'.format(gif_save_path))
Wrap_plot_seq_gif(seqs=seqs, labs=lables, save_path=gif_save_path)
# update writer iter num
self.summary.summary_train_iter_num_update()
bar.suffix = '(epoch:{epoch}) | ({batch}/{size}) | Batch: {bt:.3f}s | Total: {ttl:} | ETA: {eta:} ' \
'| d3d_real_acc: {d3d_real_acc: .4f} | d3d_fake_acc: {d3d_fake_acc: .4f} ' \
.format(epoch=self.summary.epoch, batch=batch_num, size=self.aug_generator.num_batches,
bt=(time() - start_time) / (batch_num + 1), ttl=bar.elapsed_td, eta=bar.eta_td,
d3d_real_acc=d3d_real_acc, d3d_fake_acc=d3d_fake_acc)
bar.next()
batch_num = batch_num + 1
if __name__ == '__main__':
args = parse_args()
# fix random
random_seed = args.random_seed # default 0
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
os.environ['PYTHONHASHSEED'] = str(random_seed)
# copy from #https://pytorch.org/docs/stable/notes/randomness.html
torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = True
mod = PoseGAN(args)
mod.fit(args)
mod.writer.close()
| 11,685 | 43.098113 | 125 | py |
PoseTriplet | PoseTriplet-main/estimator/posegan_basementclass.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import os.path as path
import pickle
from time import time
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import Variable
from tqdm import tqdm
from common.camera import *
from common.generators import ChunkedGenerator, UnchunkedGenerator, ChunkedNoPadGeneratorV5
from common.loss import *
from common.model import *
from common.utils import deterministic_random
from function.gan_utils import pose_seq_bl_aug
from function.logger import create_logger
from function.utils import Summary, get_scheduler, get_discriminator_accuracy, Sample_from_Pool, get_contacts
'''
basement class
'''
class PoseGANBasement(object):
def __init__(self, args):
# init param
self.device = torch.device("cuda")
self.args = args
# define checkpoint directory # Create checkpoint directory if it does not exist
# self.args.checkpoint = path.join(self.args.checkpoint, self.args.note,
# datetime.datetime.now().strftime('%m%d%H%M%S'))
self.args.checkpoint = path.join(self.args.checkpoint, self.args.note)
print('INFO: creat log folder at {}'.format(self.args.checkpoint))
os.makedirs(self.args.checkpoint, exist_ok=True)
os.makedirs(os.path.join(self.args.checkpoint, 'ckpt'), exist_ok=True)
# prepare monitor
# Init monitor for net work training
#########################################################
self.summary = Summary(self.args.checkpoint)
self.writer = self.summary.create_summary()
self.logger = create_logger(os.path.join(self.args.checkpoint, 'log.txt'))
self.logger.info(args)
def logging(self, val_rlt, epoch_start_time):
"""
recording the process of posenet, and saving model.
"""
lr = self.optimizer_P.param_groups[0]['lr']
losses_str = ' '.join(['{}: {:.4f}'.format(val_set_key, val_rlt[val_set_key]['p1']) \
for val_set_key in val_rlt])
dt = (time() - epoch_start_time) / 60
self.logger.info(
'====> Epoch: {} Time: {:.2f} min {} lr: {:.5f}'.format(self.summary.epoch, dt, losses_str, lr))
# record the result list and save ckpt
if self.summary.epoch <= 2:
self.h36m_p1_s911 = []
self.h36m_p1_s911.append(val_rlt['s911']['p1'])
# Save checkpoint if necessary
if self.h36m_p1_s911[-1] == min(self.h36m_p1_s911):
ckpt_path = os.path.join(self.args.checkpoint, 'ckpt', 'best_ckpt_S911.bin')
self.logger.info('Saving checkpoint to{}'.format(ckpt_path))
torch.save({
'model_pos': self.model_pos_train.state_dict(),
'model_traj': self.model_traj_train.state_dict(),
}, ckpt_path)
if self.summary.epoch % 5 == 0:
ckpt_path = os.path.join(self.args.checkpoint, 'ckpt', 'ckpt_ep_{:0>3d}.bin'.format(self.summary.epoch))
self.logger.info('Saving checkpoint to{}'.format(ckpt_path))
torch.save({
'model_pos': self.model_pos_train.state_dict(),
'model_traj': self.model_traj_train.state_dict(),
}, ckpt_path)
def data_preparation(self):
###################################
# prepare data
###################################
self.logger.info('Loading dataset...')
dataset_path = 'data/data_3d_' + self.args.dataset + '.npz'
if self.args.dataset == 'h36m':
from common.h36m_dataset import Human36mDataset
self.dataset = Human36mDataset(dataset_path)
else:
raise KeyError('Invalid dataset')
self.logger.info('Preparing dataset...')
for subject in self.dataset.subjects():
for action in self.dataset[subject].keys():
anim = self.dataset[subject][action]
if 'positions' in anim:
positions_3d = []
for cam in anim['cameras']:
pos_3d = world_to_camera(anim['positions'], R=cam['orientation'], t=cam['translation'])
# pos_3d[:, 1:] -= pos_3d[:, :1] # Remove global offset, but keep trajectory in first position, no, keep here
positions_3d.append(pos_3d) # T x J x 3
anim['positions_3d'] = positions_3d
if 'positions' in anim:
contact_labels = []
for cam in anim['cameras']:
contact_label = get_contacts(anim['positions'])
contact_labels.append(contact_label) # T x 2 x 1
anim['contact_labels'] = contact_labels
self.keypoints_preparation()
def keypoints_preparation(self):
# 2D keypoint
self.logger.info('Loading 2D detections...')
self.keypoints = np.load('data/data_2d_' + self.args.dataset + '_' + self.args.keypoints + '.npz',
allow_pickle=True)
# keypoints_metadata = self.keypoints['metadata'].item()
keypoints_metadata = {'num_joints': 16,
'keypoints_symmetry': [[4, 5, 6, 10, 11, 12], [1, 2, 3, 13, 14, 15]]}
keypoints_symmetry = keypoints_metadata['keypoints_symmetry']
self.kps_left, self.kps_right = list(keypoints_symmetry[0]), list(keypoints_symmetry[1])
self.joints_left, self.joints_right = list(self.dataset.skeleton().joints_left()), list(
self.dataset.skeleton().joints_right())
self.keypoints = self.keypoints['positions_2d'].item()
for subject in self.dataset.subjects():
assert subject in self.keypoints, 'Subject {} is missing from the 2D detections dataset'.format(subject)
for action in self.dataset[subject].keys():
assert action in self.keypoints[
subject], 'Action {} of subject {} is missing from the 2D detections dataset'.format(action,
subject)
if 'positions_3d' not in self.dataset[subject][action]:
continue
for cam_idx in range(len(self.keypoints[subject][action])):
# We check for >= instead of == because some videos in H3.6M contain extra frames
mocap_length = self.dataset[subject][action]['positions_3d'][cam_idx].shape[0]
assert self.keypoints[subject][action][cam_idx].shape[0] >= mocap_length
if self.keypoints[subject][action][cam_idx].shape[0] > mocap_length:
# Shorten sequence
self.keypoints[subject][action][cam_idx] = self.keypoints[subject][action][cam_idx][
:mocap_length]
assert len(self.keypoints[subject][action]) == len(self.dataset[subject][action]['positions_3d'])
# norm keypoint
for subject in self.keypoints.keys():
for action in self.keypoints[subject]:
for cam_idx, kps in enumerate(self.keypoints[subject][action]):
# Normalize camera frame
cam = self.dataset.cameras()[subject][cam_idx]
kps[..., :2] = normalize_screen_coordinates(kps[..., :2], w=cam['res_w'], h=cam['res_h'])
self.keypoints[subject][action][cam_idx] = kps
def fetch(self, subjects, action_filter=None, subset=1, parse_3d_poses=True):
out_poses_3d = []
out_poses_2d = []
out_camera_params = []
out_camera_rtparams = []
out_contact_labels = []
for subject in subjects:
for action in self.keypoints[subject].keys():
if action_filter is not None:
found = False
for a in action_filter:
if action.startswith(a):
found = True
break
if not found:
continue
poses_2d = self.keypoints[subject][action]
for i in range(len(poses_2d)): # Iterate across cameras
out_poses_2d.append(poses_2d[i])
if subject in self.dataset.cameras():
cams = self.dataset.cameras()[subject]
assert len(cams) == len(poses_2d), 'Camera count mismatch'
for cam in cams:
if 'intrinsic' in cam:
out_camera_params.append(cam['intrinsic'])
if 'extrinsic' in cam:
out_camera_rtparams.append(cam['extrinsic'])
if parse_3d_poses and 'positions_3d' in self.dataset[subject][action]:
poses_3d = self.dataset[subject][action]['positions_3d']
assert len(poses_3d) == len(poses_2d), 'Camera count mismatch'
for i in range(len(poses_3d)): # Iterate across cameras
out_poses_3d.append(poses_3d[i])
# for contact labels, same as poses_3d
if parse_3d_poses and 'contact_labels' in self.dataset[subject][action]:
contact_labels = self.dataset[subject][action]['contact_labels']
assert len(contact_labels) == len(poses_2d), 'Camera count mismatch'
for i in range(len(contact_labels)): # Iterate across cameras
out_contact_labels.append(contact_labels[i])
if len(out_camera_params) == 0:
assert False
if len(out_camera_rtparams) == 0:
assert False
if len(out_poses_3d) == 0:
assert False
if len(out_contact_labels) == 0:
assert False
stride = self.args.downsample
if subset < 1:
for i in range(len(out_poses_2d)):
n_frames = int(round(len(out_poses_2d[i]) // stride * subset) * stride)
start = deterministic_random(0, len(out_poses_2d[i]) - n_frames + 1, str(len(out_poses_2d[i])))
out_poses_2d[i] = out_poses_2d[i][start:start + n_frames:stride]
if out_poses_3d is not None:
out_poses_3d[i] = out_poses_3d[i][start:start + n_frames:stride]
elif stride > 1:
# Downsample as requested
for i in range(len(out_poses_2d)):
out_poses_2d[i] = out_poses_2d[i][::stride]
if out_poses_3d is not None:
out_poses_3d[i] = out_poses_3d[i][::stride]
out_contact_labels[i] = out_contact_labels[i][::stride]
return out_camera_params, out_camera_rtparams, out_poses_3d, out_poses_2d, out_contact_labels
def dataloader_preparation(self):
action_filter = None if self.args.actions == '*' else self.args.actions.split(',')
if action_filter is not None:
self.logger.info('Selected actions:{}'.format(action_filter))
###################################
# train subject # test subject
###################################
subjects_train = self.args.subjects_train.split(',')
subjects_test = self.args.subjects_test.split(',')
cameras_train, cam_rt_train, poses_train, poses_train_2d, contact_train = self.fetch(subjects_train,
action_filter,
subset=self.args.subset)
cameras_valid, cam_rt_valid, poses_valid, self.poses_valid_2d, contact_valid = self.fetch(subjects_test,
action_filter)
causal_shift = 0
self.pad = (np.prod([int(x) for x in self.args.architecture.split(',')]) - 1) // 2
self.rf = np.prod([int(x) for x in self.args.architecture.split(',')])
##################################################################
##### linkstart: load expert,
##################################################################
self.logger.info('INFO: self.args.expert_dict_path: {}'.format(self.args.expert_dict_path))
if self.args.expert_dict_path is None:
expert_dict = {'h36m_take_000':{'skt_wpos':np.ones((500, 16, 3))}}
take_list = ['h36m_take_000']
else:
expert_feat_file = self.args.expert_dict_path
expert_dict = pickle.load(open(expert_feat_file, 'rb'))
# take_list = [take for take in expert_dict if expert_dict[take]['t_num_reset'] == 0] # maybe filter some
take_list = ['h36m_take_{:0>3d}'.format(i) for i in range(600)]
# load expert from rib-rl
self.logger.info('INFO: self.args.extra_expert_dict_path: {}'.format(self.args.extra_expert_dict_path))
if self.args.extra_expert_dict_path is None:
extra_expert_dict = None
extra_take_list = []
else:
extra_expert_feat_file = self.args.extra_expert_dict_path
extra_expert_dict = pickle.load(open(extra_expert_feat_file, 'rb'))
extra_take_list = [take for take in extra_expert_dict if extra_expert_dict[take]['t_num_reset'] == 0]
######################################################################
# prepare a basement for every epoch update
self.fixed_fake_database = {
'cam_rt_train': cam_rt_train,
'cameras_train': cameras_train,
'take_list': take_list,
'expert_dict': expert_dict,
'causal_shift': causal_shift,
'extra_expert_dict': extra_expert_dict,
'extra_take_list': extra_take_list,
}
skt_pos_train = []
skt_pos_train_2dtarget = []
for i, take in enumerate(take_list):
# assume expert is less and shorter than h36m.
skt_pos_train.append(world2cam_sktpos(expert_dict[take]['skt_wpos']))
# skt_pos_train.append(world2cam_sktpos(reset_spine(expert_dict[take]['skt_wpos'])))
skt_pos_train_2dtarget.append(poses_train_2d[i][10:expert_dict[take]['skt_wpos'].shape[0] + 10])
########################################################################################
# extra
for i, take in enumerate(extra_take_list):
# assume expert is less and shorter than h36m.
skt_pos_train.append(world2cam_sktpos(extra_expert_dict[take]['skt_wpos']))
# skt_pos_train.append(world2cam_sktpos(reset_spine(extra_expert_dict[take]['skt_wpos'])))
skt_pos_train_2dtarget.append(poses_train_2d[i%len(poses_train_2d)][10:extra_expert_dict[take]['skt_wpos'].shape[0] + 10])
########################################################################################
# prepare data for augmenting
aug_pad = self.pad
self.aug_generator = ChunkedNoPadGeneratorV5(self.args.batch_size // self.args.stride, None, None,
skt_pos_train, skt_pos_train_2dtarget, None, self.args.stride,
pad=aug_pad, causal_shift=causal_shift, shuffle=True,
# augment=True,
augment=self.args.data_augmentation,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left, joints_right=self.joints_right)
self.logger.info('INFO: aug-supervision on {} frames'.format(self.aug_generator.num_frames()))
self.fake_cam_sample = Sample_from_Pool(max_elements=self.args.batch_size)
# train loader s15678 eval
train_generator_eval = UnchunkedGenerator(cam_rt_train, poses_train, poses_train_2d,
pad=self.pad, causal_shift=causal_shift, augment=False,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left,
joints_right=self.joints_right)
self.logger.info('INFO: Testing on {} frames > train_generator_eval'.format(train_generator_eval.num_frames()))
train_generator_eval_flip = UnchunkedGenerator(cam_rt_train, poses_train, poses_train_2d,
pad=self.pad, causal_shift=causal_shift, augment=True,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left,
joints_right=self.joints_right)
self.logger.info('INFO: Testing on {} frames > train_generator_eval_flip'.format(train_generator_eval_flip.num_frames()))
# test loader -- S911
test_generator_s911 = UnchunkedGenerator(None, poses_valid, self.poses_valid_2d,
pad=self.pad, causal_shift=causal_shift, augment=False,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left,
joints_right=self.joints_right)
self.logger.info('INFO: Testing on {} frames > test_generator_s911'.format(test_generator_s911.num_frames()))
test_generator_s911_flip = UnchunkedGenerator(None, poses_valid, self.poses_valid_2d,
pad=self.pad, causal_shift=causal_shift, augment=True,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left,
joints_right=self.joints_right)
self.logger.info('INFO: Testing on {} frames > test_generator_s911_flip'.format(test_generator_s911_flip.num_frames()))
# test loader -- 3DHP # all frame are used.
pkl_path = './data_cross/3dhp/3dhp_testset_bySub.pkl'
test_generator_3dhp = self._dataloader_preparation(pkl_path=pkl_path,
key_2d='valid_kps_2d_imgnorm',
key_3d='valid_kps_3d',
clip_flg=True)
test_generator_3dhp_flip = self._dataloader_preparation(pkl_path=pkl_path,
key_2d='valid_kps_2d_imgnorm',
key_3d='valid_kps_3d',
clip_flg=True,
test_augment=True)
# test loader -- 3DPWD
pkl_path = './data_cross/3dpw/3dpw_testset_bySub.pkl'
test_generator_3dpw = self._dataloader_preparation(pkl_path=pkl_path,
key_2d='joints_2d_imgnorm',
key_3d='valid_kps_3d',
clip_flg=True)
test_generator_3dpw_flip = self._dataloader_preparation(pkl_path=pkl_path,
key_2d='joints_2d_imgnorm',
key_3d='valid_kps_3d',
clip_flg=True,
test_augment=True)
############################
## place all test loader together
############################
self.val_generator_dict = {
's15678': train_generator_eval,
's15678_flip': train_generator_eval_flip,
's911': test_generator_s911,
's911_flip': test_generator_s911_flip,
'3dhp': test_generator_3dhp,
'3dhp_flip': test_generator_3dhp_flip,
'3dpw': test_generator_3dpw,
'3dpw_flip': test_generator_3dpw_flip,
}
def _dataloader_preparation(self, pkl_path, key_2d, key_3d, clip_flg, scale2d=1., test_augment=False):
"""
dataloader for cross data
"""
with open(pkl_path, 'rb') as fp:
self.logger.info('load from pickle file -> {}'.format(pkl_path))
tmp_npdict = pickle.load(fp)
poses_3d = []
poses_2d = []
# clip_flg = True
# [..., :2] for 2D is to remove the confidence channel.
for sub in tmp_npdict:
if clip_flg:
for clip_idx in tmp_npdict[sub]['clip_idx']:
poses_3d.append(tmp_npdict[sub][key_3d][clip_idx[0]:clip_idx[1]])
poses_2d.append(tmp_npdict[sub][key_2d][clip_idx[0]:clip_idx[1]][..., :2] * scale2d)
else:
poses_3d.append(tmp_npdict[sub][key_3d])
poses_2d.append(tmp_npdict[sub][key_2d][..., :2] * scale2d)
test_generator = UnchunkedGenerator(cameras=None, poses_3d=poses_3d, poses_2d=poses_2d,
pad=self.pad, causal_shift=0, augment=test_augment,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left,
joints_right=self.joints_right)
self.logger.info('INFO: Testing on {} frames'.format(test_generator.num_frames()))
return test_generator
def s911_detect2d_dataloader_preparation(self):
for det2d in ['hr']:
self.logger.info('INFO: load s911 det2d: {}'.format(det2d))
self.args.keypoints = det2d
self.keypoints_preparation()
self._s911_detect2d_dataloader_preparation(det2d)
def _s911_detect2d_dataloader_preparation(self, det2d):
causal_shift = 0
action_filter = None if self.args.actions == '*' else self.args.actions.split(',')
subjects_test = self.args.subjects_test.split(',')
cameras_valid, cam_rt_valid, poses_valid, poses_valid_2d, contact_valid = self.fetch(subjects_test,
action_filter)
# test loader -- S911
test_generator_s911 = UnchunkedGenerator(None, poses_valid, poses_valid_2d,
pad=self.pad, causal_shift=causal_shift, augment=False,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left,
joints_right=self.joints_right)
self.logger.info('INFO: Testing on {} frames > test_generator_s911 > det2d:{}'.format(test_generator_s911.num_frames(), det2d))
test_generator_s911_flip = UnchunkedGenerator(None, poses_valid, poses_valid_2d,
pad=self.pad, causal_shift=causal_shift, augment=True,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left,
joints_right=self.joints_right)
self.logger.info(
'INFO: Testing on {} frames > test_generator_s911_flip > det2d:{}'.format(test_generator_s911_flip.num_frames(), det2d))
self.val_generator_dict['S911_{}'.format(det2d)] = test_generator_s911
self.val_generator_dict['S911_flip_{}'.format(det2d)] = test_generator_s911_flip
def update_fixedfake_train_generator(self):
"""
update dataloader for each epoch
include bone length augmentation and z-axis rotation
"""
cam_rt_train = self.fixed_fake_database['cam_rt_train']
cameras_train = self.fixed_fake_database['cameras_train']
take_list = self.fixed_fake_database['take_list']
expert_dict = self.fixed_fake_database['expert_dict']
causal_shift = self.fixed_fake_database['causal_shift']
extra_expert_dict = self.fixed_fake_database['extra_expert_dict'] # extra for boost exp
extra_take_list = self.fixed_fake_database['extra_take_list'] # extra for boost exp
fixed_fake_cam_rt_train = []
fixed_fake_poses_train = []
fixed_fake_poses_train_2d = []
for i, take in enumerate(take_list):
cam_ex = cam_rt_train[i]
fixed_fake_cam_rt_train.append(cam_ex)
tmp_skt_wpos = expert_dict[take]['skt_wpos'].reshape(-1, 16, 3).astype('float32')
tmp_skt_wpos = zaxis_randrotation(tmp_skt_wpos)
tmp_skt_wpos = pose_seq_bl_aug(torch.from_numpy(tmp_skt_wpos)).numpy()
fixed_fake_poses_camed = world_to_camera_sktpos_v3(tmp_skt_wpos, self.args)
fixed_fake_poses_train.append(fixed_fake_poses_camed)
cam_ix = cameras_train[i]
cam_ix_tf = torch.from_numpy(np.tile(cam_ix, (fixed_fake_poses_camed.shape[0], 1)))
fixed_fake_poses_train_2d.append(
project_to_2d_purelinear(fixed_fake_poses_camed))
############################################################
# extra
for i, take in enumerate(extra_take_list):
i = i % len(cam_rt_train)
cam_ex = cam_rt_train[i]
fixed_fake_cam_rt_train.append(cam_ex)
tmp_skt_wpos = extra_expert_dict[take]['skt_wpos'].reshape(-1, 16, 3).astype('float32')
tmp_skt_wpos = zaxis_randrotation(tmp_skt_wpos)
tmp_skt_wpos = pose_seq_bl_aug(torch.from_numpy(tmp_skt_wpos)).numpy()
fixed_fake_poses_camed = world_to_camera_sktpos_v3(tmp_skt_wpos, self.args)
fixed_fake_poses_train.append(fixed_fake_poses_camed)
cam_ix = cameras_train[i]
cam_ix_tf = torch.from_numpy(np.tile(cam_ix, (fixed_fake_poses_camed.shape[0], 1)))
fixed_fake_poses_train_2d.append(
project_to_2d_purelinear(fixed_fake_poses_camed))
########################################################################################
self.train_generator = ChunkedGenerator(self.args.batch_size // self.args.stride, None,
fixed_fake_poses_train,
fixed_fake_poses_train_2d, self.args.stride,
pad=self.pad, causal_shift=causal_shift, shuffle=True,
augment=self.args.data_augmentation,
# augment=False,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left, joints_right=self.joints_right)
self.logger.info('INFO: Training on {} frames'.format(self.train_generator.num_frames()))
def _count_param(self, model, name):
# print param number size.
model_params = 0
for parameter in model.parameters():
model_params += parameter.numel()
self.logger.info('INFO: Trainable parameter count for model {} is:{}'.format(name, model_params))
def _model_preparation_pos(self):
######################################
# prepare model: posenet: 2d pose -> 3d pose
######################################
filter_widths = [int(x) for x in self.args.architecture.split(',')]
if not self.args.disable_optimizations and not self.args.dense and self.args.stride == 1:
# Use optimized model for single-frame predictions
self.model_pos_train = TemporalModelOptimized1f(self.poses_valid_2d[0].shape[-2],
self.poses_valid_2d[0].shape[-1],
self.dataset.skeleton().num_joints(),
filter_widths=filter_widths, causal=self.args.causal,
dropout=self.args.dropout, channels=self.args.channels)
else:
# When incompatible settings are detected (stride > 1, dense filters, or disabled optimization) fall back to normal model
self.model_pos_train = TemporalModel(self.poses_valid_2d[0].shape[-2],
self.poses_valid_2d[0].shape[-1],
self.dataset.skeleton().num_joints(),
filter_widths=filter_widths, causal=self.args.causal,
dropout=self.args.dropout, channels=self.args.channels,
dense=self.args.dense)
# model for eval
self.model_pos = TemporalModel(self.poses_valid_2d[0].shape[-2],
self.poses_valid_2d[0].shape[-1],
self.dataset.skeleton().num_joints(),
filter_widths=filter_widths, causal=self.args.causal, dropout=self.args.dropout,
channels=self.args.channels, dense=self.args.dense)
##################################
##################################
receptive_field = self.model_pos.receptive_field()
self.logger.info('INFO: Receptive field: {} frames'.format(receptive_field))
pad_check = (receptive_field - 1) // 2 # Padding on each side
assert pad_check == self.pad, 'pad mismatch'
# print param number size.
self._count_param(self.model_pos_train, 'self.model_pos_train')
self.model_pos = self.model_pos.cuda()
self.model_pos_train = self.model_pos_train.cuda()
###################################
# optimizer.
###################################
self.optimizer_P = torch.optim.Adam(self.model_pos_train.parameters(), lr=self.args.learning_rate)
self.lr_scheduler_P = get_scheduler(self.optimizer_P, policy='lambda', nepoch_fix=0, nepoch=self.args.epochs)
###################################
# load pretrain
###################################
if self.args.pretrain:
ckpt_path = self.args.evaluate
self.logger.info('Loading checkpoint at {}'.format(ckpt_path))
checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
self.model_pos_train.load_state_dict(checkpoint['model_pos'])
self.model_pos.load_state_dict(checkpoint['model_pos'])
def _model_preparation_traj(self):
######################################
# prepare: posenet: 2d pose -> 3d traj
######################################
filter_widths = [int(x) for x in self.args.architecture.split(',')]
if not self.args.disable_optimizations and not self.args.dense and self.args.stride == 1:
# Use optimized model for single-frame predictions
self.model_traj_train = TemporalModelOptimized1f(self.poses_valid_2d[0].shape[-2],
self.poses_valid_2d[0].shape[-1],
1,
filter_widths=filter_widths, causal=self.args.causal,
dropout=self.args.dropout, channels=self.args.channels)
else:
# When incompatible settings are detected (stride > 1, dense filters, or disabled optimization) fall back to normal model
self.model_traj_train = TemporalModel(self.poses_valid_2d[0].shape[-2],
self.poses_valid_2d[0].shape[-1],
1,
filter_widths=filter_widths, causal=self.args.causal,
dropout=self.args.dropout, channels=self.args.channels,
dense=self.args.dense)
# model for eval
self.model_traj = TemporalModel(self.poses_valid_2d[0].shape[-2],
self.poses_valid_2d[0].shape[-1],
1,
filter_widths=filter_widths, causal=self.args.causal, dropout=self.args.dropout,
channels=self.args.channels, dense=self.args.dense)
##################################
##################################
receptive_field = self.model_traj.receptive_field()
self.logger.info('INFO: Receptive field: {} frames'.format(receptive_field))
pad_check = (receptive_field - 1) // 2 # Padding on each side
assert pad_check == self.pad, 'pad mismatch'
# print param number size.
self._count_param(self.model_traj_train, 'self.model_traj_train')
self.model_traj = self.model_traj.cuda()
self.model_traj_train = self.model_traj_train.cuda()
###################################
# optimizer.
###################################
self.optimizer_T = torch.optim.Adam(self.model_traj_train.parameters(), lr=self.args.learning_rate)
self.lr_scheduler_T = get_scheduler(self.optimizer_T, policy='lambda', nepoch_fix=0, nepoch=self.args.epochs)
###################################
# load pretrain
###################################
if self.args.pretrain:
ckpt_path = self.args.evaluate
self.logger.info('Loading checkpoint at {}'.format(ckpt_path))
checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
self.model_traj_train.load_state_dict(checkpoint['model_traj'])
self.model_traj.load_state_dict(checkpoint['model_traj'])
def _model_preparation_Gcam(self):
######################################
# prepare model: Gcam: 3d pose -> 3d pose, 2d pose, different cam.
######################################
if self.args.gcam_choice == 'gcam_v0':
from poseaugtool.model_virtualCam.virtualCam import G_camera
self.model_Gcam = G_camera(self.args)
# elif self.args.gcam_choice == 'gcam_v2':
# from poseaugtool.model_virtualCam.virtualCam import G_camera_v2
# self.model_Gcam = G_camera_v2(self.args)
filter_ch = [int(x) for x in self.args.Dcamarchitecture.split(',')]
# if self.args.dcam_choice == 'dcam_v0':
# from poseaugtool.model_virtualCam.virtualCam import Pose2DVideoDiscriminator
# self.model_Dcam = Pose2DVideoDiscriminator(ks=self.args.dcam_ks, nh_conv1d=filter_ch).to(self.device)
# elif self.args.dcam_choice == 'dcam_v2':
# from poseaugtool.model_virtualCam.virtualCam import Pose2DVideoDiscriminatorV2
# self.model_Dcam = Pose2DVideoDiscriminatorV2(ks=self.args.dcam_ks, nh_conv1d=filter_ch).to(self.device)
if self.args.dcam_choice == 'dcam_pa1':
from poseaugtool.model_virtualCam.virtualCam import Pos2dPairDiscriminator
self.model_Dcam = Pos2dPairDiscriminator().to(self.device)
# elif self.args.dcam_choice == 'dcam_v5':
# from poseaugtool.model_virtualCam.virtualCam import Pos2dPairDiscriminator_v5
# self.model_Dcam = Pos2dPairDiscriminator_v5().to(self.device)
# elif self.args.dcam_choice == 'dcam_v6':
# from poseaugtool.model_virtualCam.virtualCam import Pos2dPairDiscriminator_v6
# self.model_Dcam = Pos2dPairDiscriminator_v6().to(self.device)
# print param number size.
self._count_param(self.model_Gcam, 'self.model_Gcam')
self._count_param(self.model_Dcam, 'self.model_Dcam')
# to cuda
self.model_Gcam = self.model_Gcam.cuda()
self.model_Dcam = self.model_Dcam.cuda()
###################################
# optimizer.
###################################
self.optimizer_Gcam = optim.Adam(self.model_Gcam.parameters(),
lr=self.args.lrgcam) # , amsgrad=True) #
self.lr_scheduler_Gcam = get_scheduler(self.optimizer_Gcam, policy='lambda', nepoch_fix=0,
nepoch=self.args.epochs)
self.optimizer_Dcam = optim.Adam(self.model_Dcam.parameters(),
lr=self.args.lrdcam) # , amsgrad=True) #
self.lr_scheduler_Dcam = get_scheduler(self.optimizer_Dcam, policy='lambda', nepoch_fix=0,
nepoch=self.args.epochs)
###################################
# load pretrain
###################################
if self.args.pretrain:
pass
def _train_batch_posenet(self, inputs_2d, inputs_3d, epoch_loss_3d_train, N):
# here 3D shape is single frame. BxTxJx3: T=1
target_3d_pose = inputs_3d[:, :, :, :] - inputs_3d[:, :, :1, :]
# pos_3d[:, 1:] -= inputs_3d[:, :1]
# Predict 3D poses
predicted_3d_pos = self.model_pos_train(inputs_2d)
self.optimizer_P.zero_grad()
# loss_3d_pos = mpjpe(predicted_3d_pos, target_3d_pose)
loss_3d_pos = self.MSE(predicted_3d_pos, target_3d_pose)
loss_total = loss_3d_pos * 1.
loss_total.backward()
nn.utils.clip_grad_norm_(self.model_pos_train.parameters(),
max_norm=1)
self.optimizer_P.step()
epoch_loss_3d_train += target_3d_pose.shape[0] * target_3d_pose.shape[1] * loss_3d_pos.item()
N += target_3d_pose.shape[0] * target_3d_pose.shape[1]
return loss_3d_pos.detach(), epoch_loss_3d_train, N
def _train_batch_trajnet(self, inputs_2d, inputs_3d, epoch_loss_3d_train, N):
target_3d_traj = inputs_3d[:, :, :1, :] * 1. # focus on root traj.
# pos_3d[:, 1:] -= inputs_3d[:, :1]
# Predict 3D trajs
predicted_3d_traj = self.model_traj_train(inputs_2d)
# loss_3d_traj = mpjpe(predicted_3d_traj, target_3d_traj)
self.optimizer_T.zero_grad()
# loss_3d_traj = self.MSE(predicted_3d_traj, target_3d_traj)
# weighted traj loss from videopose
w = 1 / target_3d_traj[:, :, :, 2] # Weight inversely proportional to depth
loss_3d_traj = weighted_mpjpe(predicted_3d_traj, target_3d_traj, w)
loss_total = loss_3d_traj * 1.
loss_total.backward()
nn.utils.clip_grad_norm_(self.model_traj_train.parameters(), max_norm=1) #
self.optimizer_T.step()
epoch_loss_3d_train += target_3d_traj.shape[0] * target_3d_traj.shape[1] * loss_3d_traj.item()
N += target_3d_traj.shape[0] * target_3d_traj.shape[1]
return loss_3d_traj.detach(), epoch_loss_3d_train, N
def train_posenet_realpose(self, tag='_real'):
"""
_real: dataloader from random projection
"""
start_time = time()
epoch_loss_3d_train = 0
N = 0
self.model_pos_train.train()
# Regular supervised scenario
self.logger.info(
'INFO: Train on real pose with dataloader len:{:0>4d}'.format(self.train_generator.num_batches))
for _, batch_3d, batch_2d in tqdm(self.train_generator.next_epoch()):
inputs_3d = torch.from_numpy(batch_3d.astype('float32'))
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
if torch.cuda.is_available():
inputs_3d = inputs_3d.cuda()
inputs_2d = inputs_2d.cuda()
loss_3d_pos, epoch_loss_3d_train, N = self._train_batch_posenet(inputs_2d, inputs_3d,
epoch_loss_3d_train, N)
# batch-wise log
self.writer.add_scalar('train_P_batch/{}/loss_3d_pos'.format(tag), loss_3d_pos.item(),
self.summary.train_realpose_iter_num)
self.summary.summary_train_realpose_iter_num_update()
def train_trajnet_realpose(self, tag='_real'):
"""
_real: dataloader from random projection
"""
start_time = time()
epoch_loss_3d_train = 0
N = 0
self.model_traj_train.train()
# Regular supervised scenario
self.logger.info(
'INFO: Train on real pose with dataloader len:{:0>4d}'.format(self.train_generator.num_batches))
for _, batch_3d, batch_2d in tqdm(self.train_generator.next_epoch()):
inputs_3d = torch.from_numpy(batch_3d.astype('float32'))
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
if torch.cuda.is_available():
inputs_3d = inputs_3d.cuda()
inputs_2d = inputs_2d.cuda()
loss_3d_traj, epoch_loss_3d_train, N = self._train_batch_trajnet(inputs_2d, inputs_3d,
epoch_loss_3d_train, N)
# batch-wise log
self.writer.add_scalar('train_T_batch/{}/loss_3d_traj'.format(tag), loss_3d_traj.item(),
self.summary.train_realtraj_iter_num)
self.summary.summary_train_realtraj_iter_num_update()
def _train_dis(self, model_dis, data_real, data_fake, writer_name, fake_data_pool, optimizer):
"""
"""
optimizer.zero_grad()
data_real = data_real.detach()
data_fake = data_fake.detach()
# store the fake buffer for discriminator training.
data_fake = Variable(
torch.Tensor(fake_data_pool(data_fake.cpu().detach().data.numpy()))).to(
self.device)
# for 3d part
real_3d = model_dis(data_real)
fake_3d = model_dis(data_fake)
real_label_3d = Variable(torch.ones(real_3d.size())).to(self.device)
fake_label_3d = Variable(torch.zeros(fake_3d.size())).to(self.device)
dis_3d_real_loss = self.MSE(real_3d, real_label_3d)
dis_3d_fake_loss = self.MSE(fake_3d, fake_label_3d)
# Total discriminators losses
dis_3d_loss = (dis_3d_real_loss + dis_3d_fake_loss) * 0.5
# record acc
d3d_real_acc = get_discriminator_accuracy(real_3d.reshape(-1), real_label_3d.reshape(-1))
d3d_fake_acc = get_discriminator_accuracy(fake_3d.reshape(-1), fake_label_3d.reshape(-1))
self.writer.add_scalar(writer_name + '_real_acc', d3d_real_acc, self.summary.train_iter_num)
self.writer.add_scalar(writer_name + '_fake_acc', d3d_fake_acc, self.summary.train_iter_num)
self.writer.add_scalar(writer_name + '_dis_loss', dis_3d_loss.item(), self.summary.train_iter_num)
# Update optimizer
###################################################
dis_3d_loss.backward()
nn.utils.clip_grad_norm_(model_dis.parameters(), max_norm=1)
optimizer.step()
return d3d_real_acc, d3d_fake_acc
def evaluate_posenet(self, tag='real', valset='s911'):
"""
evaluate the performance of posenet on 3 kinds of dataset
"""
start_time = time()
# End-of-epoch evaluation
with torch.no_grad():
self.model_pos.load_state_dict(self.model_pos_train.state_dict())
self.model_pos.eval()
epoch_p1_3d_valid = 0
epoch_p2_3d_valid = 0
N_valid = 0
test_generator = self.val_generator_dict[valset]
for _, batch, batch_2d in test_generator.next_epoch():
inputs_3d = torch.from_numpy(batch.astype('float32'))
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
if torch.cuda.is_available():
inputs_3d = inputs_3d.cuda()
inputs_2d = inputs_2d.cuda()
# inputs_3d[:, :, 0] = 0
inputs_3d[:, :, :, :] = inputs_3d[:, :, :, :] - inputs_3d[:, :, :1, :]
# Predict 3D poses
predicted_3d_pos = self.model_pos(inputs_2d)
# Test-time augmentation (if enabled)
if test_generator.augment_enabled():
# Undo flipping and take average with non-flipped version
predicted_3d_pos[1, :, :, 0] *= -1
predicted_3d_pos[1, :, test_generator.joints_left + test_generator.joints_right] = \
predicted_3d_pos[1, :, test_generator.joints_right + test_generator.joints_left]
predicted_3d_pos = torch.mean(predicted_3d_pos, dim=0, keepdim=True)
inputs_3d = inputs_3d[:1]
loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d)
epoch_p1_3d_valid += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_pos.item()
N_valid += inputs_3d.shape[0] * inputs_3d.shape[1]
p2_3d_pos = p_mpjpe(predicted_3d_pos.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1])
, inputs_3d.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1]))
epoch_p2_3d_valid += inputs_3d.shape[0] * inputs_3d.shape[1] * p2_3d_pos.item()
# analysis result
epoch_p1_3d_valid = epoch_p1_3d_valid / N_valid * 1000
epoch_p2_3d_valid = epoch_p2_3d_valid / N_valid * 1000
elapsed = (time() - start_time) / 60
# epoch-wise log.
self.writer.add_scalar('eval_P_epoch_{}/{}_p1'.format(tag, valset), epoch_p1_3d_valid, self.summary.epoch)
self.writer.add_scalar('eval_P_epoch_{}/{}_p2'.format(tag, valset), epoch_p2_3d_valid, self.summary.epoch)
return {'p1': epoch_p1_3d_valid}
def evaluate_posenet_withPCK(self, tag='real', valset='3dhp_flip'):
"""
evaluate the performance of posenet for 3DHP
:return:
"""
start_time = time()
# End-of-epoch evaluation
with torch.no_grad():
self.model_pos.load_state_dict(self.model_pos_train.state_dict())
self.model_pos.eval()
epoch_p1_3d_valid = 0
epoch_p2_3d_valid = 0
epoch_pck_3d_valid = 0
epoch_auc_3d_valid = 0
epoch_pck_3dscaled_valid = 0
epoch_auc_3dscaled_valid = 0
epoch_pck_3daligned_valid = 0
epoch_auc_3daligned_valid = 0
N_valid = 0
test_generator = self.val_generator_dict[valset]
for _, batch, batch_2d in test_generator.next_epoch():
inputs_3d = torch.from_numpy(batch.astype('float32'))
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
if torch.cuda.is_available():
inputs_3d = inputs_3d.cuda()
inputs_2d = inputs_2d.cuda()
# inputs_3d[:, :, 0] = 0
inputs_3d[:, :, :, :] = inputs_3d[:, :, :, :] - inputs_3d[:, :, :1, :]
# Predict 3D poses
predicted_3d_pos = self.model_pos(inputs_2d)
# Test-time augmentation (if enabled)
if test_generator.augment_enabled():
# Undo flipping and take average with non-flipped version
predicted_3d_pos[1, :, :, 0] *= -1
predicted_3d_pos[1, :, test_generator.joints_left + test_generator.joints_right] = \
predicted_3d_pos[1, :, test_generator.joints_right + test_generator.joints_left]
predicted_3d_pos = torch.mean(predicted_3d_pos, dim=0, keepdim=True)
inputs_3d = inputs_3d[:1]
# to numpy
predicted_3d_pos = predicted_3d_pos.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1])
inputs_3d = inputs_3d.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1])
# align a pose result
predicted_3d_pos_aligned = pose_align(predicted_3d_pos, inputs_3d)
# align a pose result
predicted_3d_pos_scaled = pose_scaled(torch.from_numpy(predicted_3d_pos).unsqueeze(0), torch.from_numpy(inputs_3d).unsqueeze(0)).squeeze(0).cpu().numpy()
# caculate p1 p2 pck auc
loss_3d_pos = mpjpe(torch.from_numpy(predicted_3d_pos), torch.from_numpy(inputs_3d)).item() * 1000.0
p2_3d_pos = p_mpjpe(predicted_3d_pos, inputs_3d).item() * 1000.0
# compute AUC and PCK
pck = compute_PCK(inputs_3d, predicted_3d_pos)
auc = compute_AUC(inputs_3d, predicted_3d_pos)
# compute AUC and PCK after aligned
pck_aligned = compute_PCK(inputs_3d, predicted_3d_pos_aligned)
auc_aligned = compute_AUC(inputs_3d, predicted_3d_pos_aligned)
# compute AUC and PCK after aligned
pck_scaled = compute_PCK(inputs_3d, predicted_3d_pos_scaled)
auc_scaled = compute_AUC(inputs_3d, predicted_3d_pos_scaled)
epoch_p1_3d_valid += inputs_3d.shape[0] * loss_3d_pos
epoch_p2_3d_valid += inputs_3d.shape[0] * p2_3d_pos
epoch_pck_3d_valid += inputs_3d.shape[0] * pck
epoch_auc_3d_valid += inputs_3d.shape[0] * auc
epoch_pck_3daligned_valid += inputs_3d.shape[0] * pck_aligned
epoch_auc_3daligned_valid += inputs_3d.shape[0] * auc_aligned
epoch_pck_3dscaled_valid += inputs_3d.shape[0] * pck_scaled
epoch_auc_3dscaled_valid += inputs_3d.shape[0] * auc_scaled
N_valid += inputs_3d.shape[0]
# analysis result
epoch_p1_3d_valid = epoch_p1_3d_valid / N_valid
epoch_p2_3d_valid = epoch_p2_3d_valid / N_valid
epoch_pck_3d_valid = epoch_pck_3d_valid / N_valid
epoch_auc_3d_valid = epoch_auc_3d_valid / N_valid
epoch_pck_3daligned_valid = epoch_pck_3daligned_valid / N_valid
epoch_auc_3daligned_valid = epoch_auc_3daligned_valid / N_valid
epoch_pck_3dscaled_valid = epoch_pck_3dscaled_valid / N_valid
epoch_auc_3dscaled_valid = epoch_auc_3dscaled_valid / N_valid
elapsed = (time() - start_time) / 60
# epoch-wise log.
self.writer.add_scalar('eval_P_pck_epoch_{}/{}_p1'.format(tag, valset), epoch_p1_3d_valid, self.summary.epoch)
self.writer.add_scalar('eval_P_pck_epoch_{}/{}_p2'.format(tag, valset), epoch_p2_3d_valid, self.summary.epoch)
self.writer.add_scalar('eval_P_pck_epoch_{}/{}_pck'.format(tag, valset), epoch_pck_3d_valid, self.summary.epoch)
self.writer.add_scalar('eval_P_pck_epoch_{}/{}_auc'.format(tag, valset), epoch_auc_3d_valid, self.summary.epoch)
self.writer.add_scalar('eval_P_pck_epoch_{}/{}_pck_aligned'.format(tag, valset), epoch_pck_3daligned_valid, self.summary.epoch)
self.writer.add_scalar('eval_P_pck_epoch_{}/{}_auc_aligned'.format(tag, valset), epoch_auc_3daligned_valid, self.summary.epoch)
self.writer.add_scalar('eval_P_pck_epoch_{}/{}_pck_scaled'.format(tag, valset), epoch_pck_3dscaled_valid, self.summary.epoch)
self.writer.add_scalar('eval_P_pck_epoch_{}/{}_auc_scaled'.format(tag, valset), epoch_auc_3dscaled_valid, self.summary.epoch)
return {
'p1': epoch_p1_3d_valid,
'p2': epoch_p2_3d_valid,
}
def evaluate_trajnet(self, tag='real', valset='s911'):
"""
evaluate the performance of posenet
"""
start_time = time()
# End-of-epoch evaluation
with torch.no_grad():
self.model_traj.load_state_dict(self.model_traj_train.state_dict())
self.model_traj.eval()
epoch_p1_3d_valid = 0
N_valid = 0
# Evaluate on test set
for cam, batch, batch_2d in self.val_generator_dict[valset].next_epoch():
inputs_3d = torch.from_numpy(batch.astype('float32'))
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
if torch.cuda.is_available():
inputs_3d = inputs_3d.cuda()
inputs_2d = inputs_2d.cuda()
target_3d_traj = inputs_3d[:, :, :1, :] * 1. # focus on root traj.
# Predict 3D trajes
predicted_3d_traj = self.model_traj(inputs_2d)
loss_3d_traj = mpjpe(predicted_3d_traj, target_3d_traj)
epoch_p1_3d_valid += target_3d_traj.shape[0] * target_3d_traj.shape[1] * loss_3d_traj.item()
N_valid += target_3d_traj.shape[0] * target_3d_traj.shape[1]
# analysis result
epoch_p1_3d_valid = epoch_p1_3d_valid / N_valid * 1000
elapsed = (time() - start_time) / 60
# epoch-wise log.
self.writer.add_scalar('eval_T_epoch_{}/{}_p1'.format(tag, valset), epoch_p1_3d_valid, self.summary.epoch)
return {'p1': epoch_p1_3d_valid}
def _zip_GIFplot_array(self, tensor_lst):
"""
for plot function pre-preocess
"""
lst = []
for item in tensor_lst:
if item.shape[-1] == 3: # for 3D case
lst.append(item.detach().cpu().numpy()[:1])
elif item.shape[-1] == 2:
tmp2d = item.detach().cpu().numpy()[:1]
tmp2d = np.concatenate([tmp2d, np.zeros_like(tmp2d)[..., -1:]], axis=-1)
lst.append(tmp2d)
else:
assert False, 'wrong data get'
return np.concatenate(lst)
def random_aug_d2d(self, x):
r1 = self.args.d2d_random_lb
r2 = self.args.d2d_random_ub
random_weight = torch.FloatTensor(x.shape[0], 1, 1, 1).uniform_(r1, r2).to(x.device)
return x * random_weight
| 55,174 | 51.647901 | 169 | py |
PoseTriplet | PoseTriplet-main/estimator/function/utils.py | from __future__ import absolute_import, division
import os
import torch
import numpy as np
from tensorboardX import SummaryWriter
# self define tools
class Summary(object):
def __init__(self, directory):
self.directory = directory
self.epoch = 0
self.writer = None
self.phase = 0
self.train_iter_num = 0
self.train_realpose_iter_num = 0
self.train_fakepose_iter_num = 0
self.train_realtraj_iter_num = 0
self.train_faketraj_iter_num = 0
self.test_iter_num = 0
self.test_MPI3D_iter_num = 0
def create_summary(self):
self.writer = SummaryWriter(log_dir=os.path.join(self.directory))
return self.writer
def summary_train_iter_num_update(self):
self.train_iter_num = self.train_iter_num + 1
def summary_train_realpose_iter_num_update(self):
self.train_realpose_iter_num = self.train_realpose_iter_num + 1
def summary_train_fakepose_iter_num_update(self):
self.train_fakepose_iter_num = self.train_fakepose_iter_num + 1
def summary_train_realtraj_iter_num_update(self):
self.train_realtraj_iter_num = self.train_realtraj_iter_num + 1
def summary_train_faketraj_iter_num_update(self):
self.train_faketraj_iter_num = self.train_faketraj_iter_num + 1
def summary_test_iter_num_update(self):
self.test_iter_num = self.test_iter_num + 1
def summary_test_MPI3D_iter_num_update(self):
self.test_MPI3D_iter_num = self.test_MPI3D_iter_num + 1
def summary_epoch_update(self):
self.epoch = self.epoch + 1
def summary_phase_update(self):
self.phase = self.phase + 1
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
from torch.optim import lr_scheduler
'cp from dlow'
def get_scheduler(optimizer, policy, nepoch_fix=None, nepoch=None, decay_step=None):
if policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch - nepoch_fix) / float(nepoch - nepoch_fix + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif policy == 'step':
scheduler = lr_scheduler.StepLR(
optimizer, step_size=decay_step, gamma=0.1)
elif policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', policy)
return scheduler
########################################
# generate mask for pose sequence input
########################################
MASK_MODES = ('No mask', 'Future Prediction', 'Missing Frames', 'Structured Occlusion')
body_members = {
'left_arm': {'joints': [8, 9, 10, 11], 'side': 'left'},
'right_arm': {'joints': [8, 12, 13, 14], 'side': 'right'},
'head': {'joints': [8, 9], 'side': 'right'},
'torso': {'joints': [0, 7, 8], 'side': 'right'},
'left_leg': {'joints': [0, 4, 5, 6], 'side': 'left'},
'right_leg': {'joints': [0, 1, 2, 3], 'side': 'right'},
}
def gen_mask(mask_type, keep_prob, batch_size, njoints, seq_len, body_members=body_members, keep_feet=False):
# Default mask, no mask
mask = np.ones(shape=(batch_size, seq_len, njoints, 1))
if mask_type == 1: # Future Prediction
mask[:, np.int(seq_len * keep_prob):, :, :] = 0.0
elif mask_type == 2: # Missing Frames patch
occ_frames = np.random.randint(seq_len - np.int(seq_len * keep_prob), size=1)
mask[:, np.int(occ_frames):np.int(occ_frames+np.int(seq_len * keep_prob)), :, :] = 0.0
elif mask_type == 3: # Structured Occlusion Simulation
rand_joints = set()
while ((njoints - len(rand_joints)) >
(njoints * keep_prob)):
joints_to_add = (list(body_members.values())[np.random.randint(len(body_members))])['joints']
for joint in joints_to_add:
rand_joints.add(joint)
mask[:, :, list(rand_joints), :] = 0.0
if keep_feet and np.random.uniform() > 0.5:
# keep feet
mask[:, :, [3, 6], :] = 1.0
# This unmasks first and last frame for all sequences (required for baselines)
# all should have, to avoid static prediction
mask[:, [0, -1], :, :] = 1.0
return mask
########################################
# tools for GAN training
########################################
def set_grad(nets, requires_grad=False):
for net in nets:
for param in net.parameters():
param.requires_grad = requires_grad
def get_discriminator_accuracy(prediction, label):
'''
this is to get discriminator accuracy for tensorboard
input is tensor -> convert to numpy
:param tensor_in: Bs x Score :: where score > 0.5 mean True.
:return:
'''
# get numpy from tensor
prediction = prediction.cpu().detach().numpy()
label = label.cpu().detach().numpy()
rlt = np.abs(prediction - label)
rlt = np.where(rlt > 0.5, 0, 1)
num_of_correct = np.sum(rlt)
accuracy = num_of_correct / label.shape[0]
return accuracy
import copy
# To store 50 generated image in a pool and sample from it when it is full
# Shrivastava et al’s strategy
class Sample_from_Pool(object):
def __init__(self, max_elements=None):
self.max_elements = max_elements
self.cur_elements = 0
self.items = []
def __call__(self, in_items):
return_items = []
for in_item in in_items:
if self.cur_elements < self.max_elements:
self.items.append(in_item)
self.cur_elements = self.cur_elements + 1
return_items.append(in_item)
else:
if np.random.ranf() > 0.5:
idx = np.random.randint(0, self.max_elements)
tmp = copy.copy(self.items[idx])
self.items[idx] = in_item
return_items.append(tmp)
else:
return_items.append(in_item)
return return_items
import numpy as np
def get_contacts(poses):
'''
https://github.com/Shimingyi/MotioNet/blob/fbceb5ffa85a509ed5b42b06c1766cea9cdcd328/data/h36m_dataset.py
pose contact label extraction
:param poses:
:return:
'''
poses_reshape = poses.reshape((-1, 16, 3))
contact_signal = np.zeros((poses_reshape.shape[0], 3))
left_z = poses_reshape[:, 3, 2]
right_z = poses_reshape[:, 6, 2]
contact_signal[left_z <= (np.mean(np.sort(left_z)[:left_z.shape[0] // 5]) + 2e-2), 0] = 1
contact_signal[right_z <= (np.mean(np.sort(right_z)[:right_z.shape[0] // 5]) + 2e-2), 1] = 1
left_velocity = np.sqrt(np.sum((poses_reshape[2:, 3] - poses_reshape[:-2, 3]) ** 2, axis=-1))
right_velocity = np.sqrt(np.sum((poses_reshape[2:, 6] - poses_reshape[:-2, 6]) ** 2, axis=-1))
contact_signal[1:-1][left_velocity >= 5e-3, 0] = 0
contact_signal[1:-1][right_velocity >= 5e-3, 1] = 0
return contact_signal
def check_isNone(cklst):
"""
cklst: list of tensor
:return:
"""
for item in cklst:
assert not torch.isnan(item).any()
# ..mk dir
def mkd(target_dir, get_parent=True):
# get parent path and create
if get_parent:
savedir = os.path.abspath(os.path.join(target_dir, os.pardir))
else:
savedir = target_dir
if not os.path.exists(savedir):
os.makedirs(savedir, exist_ok=True)
| 7,848 | 33.730088 | 109 | py |
PoseTriplet | PoseTriplet-main/estimator/function/gan_utils.py | import torch.nn as nn
import torch
import numpy as np
import torchgeometry as tgm
'''
function on pose related information extraction.
'''
def get_pose3dbyBoneVec(bones, num_joints=16):
'''
conver bone vect to pose3d,is the inverse of get_bone_vector
:param bones:
:return:
'''
Ctinverse = torch.Tensor([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 basement
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 1
[-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 1 2
[-1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 2 3
[0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 4
[0, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 4 5
[0, 0, 0, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 5 6
[0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0], # 0 7
[0, 0, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0], # 7 8
[0, 0, 0, 0, 0, 0, -1, -1, -1, 0, 0, 0, 0, 0, 0], # 8 9
[0, 0, 0, 0, 0, 0, -1, -1, 0, -1, 0, 0, 0, 0, 0], # 8 10
[0, 0, 0, 0, 0, 0, -1, -1, 0, -1, -1, 0, 0, 0, 0], # 10 11
[0, 0, 0, 0, 0, 0, -1, -1, 0, -1, -1, -1, 0, 0, 0], # 11 12
[0, 0, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, 0, 0], # 8 13
[0, 0, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0], # 13 14
[0, 0, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, -1], # 14 15
]).transpose(1, 0)
Ctinverse = Ctinverse.to(bones.device)
C = Ctinverse.repeat([bones.size(0), 1, 1]).reshape(-1, num_joints - 1, num_joints)
bonesT = bones.permute(0, 2, 1).contiguous()
pose3d = torch.matmul(bonesT, C)
pose3d = pose3d.permute(0, 2, 1).contiguous() # back to N x 16 x 3
return pose3d
def get_BoneVecbypose3d(x, num_joints=16):
'''
convert 3D point to bone vector
:param x: N x number of joint x 3
:return: N x number of bone x 3 number of bone = number of joint - 1
'''
Ct = torch.Tensor([
[1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 1
[0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 1 2
[0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 2 3
[1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 4
[0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 4 5
[0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 5 6
[1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0], # 0 7
[0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0], # 7 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0], # 8 9
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0, 0], # 8 10
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0], # 10 11
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0], # 11 12
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0], # 8 13
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0], # 13 14
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1], # 14 15
]).transpose(1, 0)
Ct = Ct.to(x.device)
C = Ct.repeat([x.size(0), 1, 1]).reshape(-1, num_joints, num_joints - 1)
pose3 = x.permute(0, 2, 1).contiguous()
B = torch.matmul(pose3, C)
B = B.permute(0, 2, 1) # back to N x 15 x 3
return B
def get_bone_lengthbypose3d(x, bone_dim=2):
'''
:param bone_dim: dim=2
:return:
'''
bonevec = get_BoneVecbypose3d(x)
bones_length = torch.norm(bonevec, dim=2, keepdim=True)
return bones_length
def get_bone_unit_vecbypose3d(x, num_joints=16, bone_dim=2):
bonevec = get_BoneVecbypose3d(x)
bonelength = get_bone_lengthbypose3d(x)
bone_unitvec = bonevec / bonelength
return bone_unitvec
from function.utils import get_contacts
def get_leg_ratio(bl_old, bl_new):
"""
bl: bx15x1
"""
assert len(bl_old.shape) == 4
assert len(bl_new.shape) == 4
leg_len_old = bl_old[:, 0, 1, 0] + bl_old[:, 0, 2, 0]
leg_len_new = bl_new[:, 0, 1, 0] + bl_new[:, 0, 2, 0]
leg_ratio = leg_len_new / leg_len_old
return leg_ratio # bx1
def pose_seq_bl_aug(pose_seq_in, kbl=None):
"""
kbl: kx15 from S15678 or s911
pose_seq: tx16x3
"""
if not kbl:
kbl = np.load('./data/bonelength/bl_15segs_templates_mdifyed.npy').astype('float32') # 15 bl from Evol(cvpr2020).
kbl = torch.from_numpy(kbl)
# size match
pose_seq = pose_seq_in * 1.
root_pose = pose_seq[:, :1, :] * 1.
# get BL BV then convert back.
i = np.random.randint(kbl.shape[0])
bl = kbl[i] * 1.
bl = bl.unsqueeze(0).unsqueeze(-1)
bv = get_bone_unit_vecbypose3d(pose_seq)
out = get_pose3dbyBoneVec(bv * bl)
bl_old = get_bone_lengthbypose3d(pose_seq)
leg_ratio = get_leg_ratio(bl_old.unsqueeze(0), bl.unsqueeze(0))
out = out + root_pose * leg_ratio
return out
def pose_seq_bl_reset(pose_seq_in):
"""
pose_seq: tx16x3
reset to RL bone length (average of s15678)
"""
# size match
pose_seq = pose_seq_in * 1.
root_pose = pose_seq[:, :1, :] * 1.
# bl = np.array([[[0.1332899], [0.4379], [0.447],
# [0.1332899], [0.4379], [0.447],
# [0.24004446], [0.2710998], [0.16976325],
# [0.15269038], [0.2798], [0.25],
# [0.15269038], [0.2798], [0.25]]], dtype='float32')
bl = np.array([[[0.13545841], [0.45170274], [0.4469572],
[0.13545777], [0.45170122], [0.44695726],
[0.2414928], [0.25551477], [0.18441138],
[0.15050778], [0.28198972], [0.24994883],
[0.15050682], [0.28199276], [0.24994786]]], dtype='float32') # bone length used in RL
bl = torch.from_numpy(bl)
bv = get_bone_unit_vecbypose3d(pose_seq)
out = get_pose3dbyBoneVec(bv * bl)
bl_old = get_bone_lengthbypose3d(pose_seq)
leg_ratio = get_leg_ratio(bl_old.unsqueeze(0), bl.unsqueeze(0))
out = out + root_pose * leg_ratio
return out
def pose_seq_bl_aug_batch(pose_seq_batch):
"""
pose_seq_batch: b x t x j x c
"""
b, t, j, c = pose_seq_batch.shape
# s15678bl 5x15
# kbl = np.load('./data/bonelength/hm36s15678_bl_templates.npy')
kbl = np.load('./data/bonelength/bl_15segs_templates_mdifyed.npy').astype('float32') # 15 bl from Evol git.
kbl = torch.from_numpy(kbl.astype('float32')).to(pose_seq_batch.device)
# random b bl
bbl_idx = np.random.choice(kbl.shape[0], b)
bbl = kbl[bbl_idx] # b x 15
bbl = bbl.unsqueeze(1).unsqueeze(-1) # bx1x15x1
# root traj
root_pose = pose_seq_batch[:, :, :1, :] * 1.
pose_seq_bt = pose_seq_batch.reshape(b*t, 16, 3) * 1.
# calculate bl
bl_old_bt = get_bone_lengthbypose3d(pose_seq_bt) # bt x 15 x 1
bl_old = bl_old_bt.reshape(b, t, 15, 1)
# calculate ratio
leg_ratio = get_leg_ratio(bl_old, bbl).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) # bx1x1x1
# change BL
bv_bt = get_bone_unit_vecbypose3d(pose_seq_bt)
bv = bv_bt.reshape(b, t, 15, 3)
bv = bv * bbl
bv_bt = bv.reshape(b*t, 15, 3)
out_bt = get_pose3dbyBoneVec(bv_bt)
out = out_bt.reshape(b, t, 16, 3)
out = out + root_pose * leg_ratio
return out
def kcs_layer_unit(x, num_joints=16):
# implementation of the Kinematic Chain Space as described in the paper
# apply local KCS later. by mask the Ct.
# KCS matrix
Ct = torch.Tensor([
[1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 1
[0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 1 2
[0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 2 3
[1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 4
[0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 4 5
[0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 5 6
[1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0], # 0 7
[0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0], # 7 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0], # 8 9
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0, 0], # 8 10
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0], # 10 11
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0], # 11 12
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0], # 8 13
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0], # 13 14
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1], # 14 15
]).transpose(1, 0)
Ct = Ct.to(x.device)
C = Ct.repeat([x.size(0), 1, 1]).reshape(-1, num_joints, num_joints - 1)
x = x.reshape(x.size(0), -1, 3)
pose3 = x.permute(0, 2, 1).contiguous() # 这里16x3变成3x16的话 应该用permute吧
B = torch.matmul(pose3, C)
B = B / torch.norm(B, dim=1, keepdim=True)
Psi = torch.matmul(B.permute(0, 2, 1), B)
return Psi
def get_discriminator_accuracy(prediction, label):
'''
this is to get discriminator accuracy for tensorboard
input is tensor -> convert to numpy
:param tensor_in: Bs x Score :: where score > 0.5 mean True.
:return:
'''
# get numpy from tensor
prediction = prediction.cpu().detach().numpy()
label = label.cpu().detach().numpy()
rlt = np.abs(prediction - label)
rlt = np.where(rlt > 0.5, 0, 1)
num_of_correct = np.sum(rlt)
accuracy = num_of_correct / label.shape[0]
return accuracy
# basic tool
def diff(input, axis=None):
# now is b t j 3
tmp = input[:, 1:] - input[:, :-1]
return torch.cat([tmp, tmp[:, -1:]], dim=1)
import copy
# To store 50 generated image in a pool and sample from it when it is full
# Shrivastava et al’s strategy
class Sample_from_Pool(object):
def __init__(self, max_elements=4096):
self.max_elements = max_elements
self.cur_elements = 0
self.items = []
def __call__(self, in_items):
return_items = []
for in_item in in_items:
if self.cur_elements < self.max_elements:
self.items.append(in_item)
self.cur_elements = self.cur_elements + 1
return_items.append(in_item)
else:
if np.random.ranf() > 0.5:
idx = np.random.randint(0, self.max_elements)
tmp = copy.copy(self.items[idx])
self.items[idx] = in_item
return_items.append(tmp)
else:
return_items.append(in_item)
return return_items
def diff_range_loss(a, b, std):
diff = (torch.abs(a) - b) ** 2
diff = torch.mean(diff, dim=-1, keepdim=True)
weight = torch.where(diff > std ** 2, torch.ones_like(diff), torch.zeros_like(diff))
diff_weighted = diff * weight
return diff_weighted.mean()
#######
def btjd2bft(x):
'''
convert bxtxjx3 to b x j x t for 1D conv
'''
assert len(x.shape) == 4
assert x.shape[-2] == 16
sz = x.shape
x = x.view(sz[0], sz[1], -1)
x = x.permute(0, 2, 1)
return x
def bft2btjd(x):
'''
convert bxtxjx3 to b x j x t for 1D conv
'''
assert len(x.shape) == 3
sz = x.shape
x = x.permute(0, 2, 1)
x = x.view(sz[0], sz[2], 16, -1)
return x
| 11,075 | 34.386581 | 122 | py |
PoseTriplet | PoseTriplet-main/estimator/common/custom_dataset.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import copy
from common.skeleton import Skeleton
from common.mocap_dataset import MocapDataset
from common.camera import normalize_screen_coordinates, image_coordinates
from common.h36m_dataset import h36m_skeleton
custom_camera_params = {
'id': None,
'res_w': None, # Pulled from metadata
'res_h': None, # Pulled from metadata
# Dummy camera parameters (taken from Human3.6M), only for visualization purposes
'azimuth': 70, # Only used for visualization
'orientation': [0.1407056450843811, -0.1500701755285263, -0.755240797996521, 0.6223280429840088],
'translation': [1841.1070556640625, 4955.28466796875, 1563.4454345703125],
}
class CustomDataset(MocapDataset):
def __init__(self, detections_path, remove_static_joints=True):
super().__init__(fps=None, skeleton=h36m_skeleton)
# Load serialized dataset
data = np.load(detections_path, allow_pickle=True)
resolutions = data['metadata'].item()['video_metadata']
self._cameras = {}
self._data = {}
for video_name, res in resolutions.items():
cam = {}
cam.update(custom_camera_params)
cam['orientation'] = np.array(cam['orientation'], dtype='float32')
cam['translation'] = np.array(cam['translation'], dtype='float32')
cam['translation'] = cam['translation']/1000 # mm to meters
cam['id'] = video_name
cam['res_w'] = res['w']
cam['res_h'] = res['h']
self._cameras[video_name] = [cam]
self._data[video_name] = {
'custom': {
'cameras': cam
}
}
if remove_static_joints:
# Bring the skeleton to 17 joints instead of the original 32
self.remove_joints([4, 5, 9, 10, 11, 16, 20, 21, 22, 23, 24, 28, 29, 30, 31])
# Rewire shoulders to the correct parents
self._skeleton._parents[11] = 8
self._skeleton._parents[14] = 8
def supports_semi_supervised(self):
return False
##############################
### for rnn fake data
##############################
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from tqdm import tqdm
import glob
class MyFakeDataset(Dataset):
def __init__(self, folder, rf=81, random_t=True):
self.nplist = glob.glob(folder)
# inputs_2d_fake = []
# inputs_3dcam_fake = []
# cam_in = []
# cam_ex = []
# print('start load fake data')
# for item in tqdm(nplist):
# tmp = np.load(item)
# inputs_2d_fake.append(tmp['inputs_2d_fake'])
# inputs_3dcam_fake.append(tmp['inputs_3dcam_fake'])
# cam_in.append(tmp['cam_in'])
# cam_ex.append(tmp['cam_ex'])
#
# self.inputs_2d_fake = np.concatenate(inputs_2d_fake, axis=0)
# self.inputs_3dcam_fake = np.concatenate(inputs_3dcam_fake, axis=0)
# self.cam_in = np.concatenate(cam_in, axis=0)
# self.cam_ex = np.concatenate(cam_ex, axis=0)
# print('finish load fake data')
self.rf = rf
self.random_t = random_t
def __getitem__(self, index):
path = self.nplist[index]
tmp = np.load(path)
if self.random_t:
start_t = int(np.random.randint(0, tmp['inputs_2d_fake'].shape[1] - self.rf, size=1))
else:
start_t = int(0)
end_t = int(start_t + self.rf)
inputs_2d_fake = tmp['inputs_2d_fake'][:,start_t:end_t]
inputs_3dcam_fake = tmp['inputs_3dcam_fake'][:,start_t:end_t]
cam_in = tmp['cam_in'][:,start_t:end_t]
cam_ex = tmp['cam_ex'][:,start_t:end_t]
return cam_in, cam_ex, inputs_3dcam_fake, inputs_2d_fake
def __len__(self):
return len(self.nplist)
##############################
### for cnn fake data
##############################
class MyFakeDataset_v1(Dataset):
def __init__(self, folder, rf=81, random_t=True):
self.nplist = glob.glob(folder)
print('load fake data: ', len(self.nplist))
def __getitem__(self, index):
path = self.nplist[index]
tmp = np.load(path)
fake3d_world = np.delete(tmp.squeeze(), 10, axis=0)/1000.0
fake3d_world = np.transpose(fake3d_world, (1, 0, 2))
return fake3d_world
def __len__(self):
return len(self.nplist)
| 4,740 | 33.355072 | 101 | py |
PoseTriplet | PoseTriplet-main/estimator/common/camera.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import torch
import torchgeometry as tgm
from common.utils import wrap
from common.quaternion import qrot, qinverse
def normalize_screen_coordinates(X, w, h):
assert X.shape[-1] == 2
# Normalize so that [0, w] is mapped to [-1, 1], while preserving the aspect ratio
return X/w*2 - [1, h/w]
def image_coordinates(X, w, h):
assert X.shape[-1] == 2
# Reverse camera frame normalization
return (X + [1, h/w])*w/2
####################################
# world_to_camera camera_to_world
# X:nxjx3 R:4 t:3
####################################
def world_to_camera(X, R, t):
Rt = wrap(qinverse, R) # Invert rotation
return wrap(qrot, np.tile(Rt, (*X.shape[:-1], 1)), X - t) # Rotate and translate
def camera_to_world(X, R, t):
return wrap(qrot, np.tile(R, (*X.shape[:-1], 1)), X) + t
####################################
# world_to_camera camera_to_world in torch version
# X:nxjx3 R:nx4 t:nx3
####################################
def world_to_cameraByTensor(X, R, t):
Rt = qinverse(R) # Invert rotation
return qrot(Rt.unsqueeze(1).unsqueeze(1).repeat(1, *X.shape[1:-1], 1), X - t.unsqueeze(1).unsqueeze(1).repeat(1, *X.shape[1:-1], 1)) # Rotate and translate
# tmp_X = X.view(-1, 16, 3)
# tmp_R = R.view(-1, 4)
# tmp_t = t.view(-1, 3)
# tmp_Rt = qinverse(tmp_R) # Invert rotation
# tmp_out = qrot(tmp_Rt, tmp_X-tmp_t) # Rotate and translate
# return tmp_out.view(X.shape)
def camera_to_worldByTensor(X, R, t):
# tmp = R.unsqueeze(1).unsqueeze(1).repeat(1, *X.shape[1:-1], 1)
# tmp2 = qrot(R.unsqueeze(1).unsqueeze(1).repeat(1, *X.shape[1:-1], 1), X)
return qrot(R.unsqueeze(1).unsqueeze(1).repeat(1, *X.shape[1:-1], 1), X) + t.unsqueeze(1).unsqueeze(1).repeat(1, *X.shape[1:-1], 1)
# tmp_X = X.view(-1, 16, 3)
# tmp_R = R.view(-1, 4)
# tmp_t = t.view(-1, 3)
# tmp_out = qrot(tmp_R, tmp_X) + tmp_t # Rotate and translate
# return tmp_out.view(X.shape)
################################
def world2cam_sktpos(skt_in):
# t1 = skt_in.reshape(-1, 17, 3)
# gt_3d_world = np.delete(t1, (8), axis=1) # remove nose
gt_3d_world = skt_in.reshape(-1, 16, 3)
# from x y z to x z -y
gt_3d = gt_3d_world[..., [0, 2, 1]]
# gt_3d[:, :, 1] = gt_3d[:, :, 1] * -1
gt_3d = gt_3d * -1
return gt_3d
def cam2world_sktpos(skt_in):
# change Z up to Y up coordinate
# skint_in: b t j 3
tmp_skt_in = skt_in * 1.0
# tmp_skt_in[:, :, :, 1] = tmp_skt_in[:, :, :, 1] * -1
tmp_skt_in = tmp_skt_in * -1
gt_3d_world = tmp_skt_in[..., [0, 2, 1]]
return gt_3d_world
################################
####################################
# world_to_camera camera_to_world
# apply for sktpos
# X:nxjx3 R:4 t:3
####################################
def reset_center(tmp):
"""
tmp:tx16x3
"""
tmp = tmp * 1.
x = tmp[:, 0, 0]
y = tmp[:, 0, 1]
xmin = np.min(x)
xmax = np.max(x)
xcenter = (xmin + xmax) * 0.5
ymin = np.min(y)
ymax = np.max(y)
ycenter = (ymin + ymax) * 0.5 - 0.4
offset = np.array([[[xcenter, ycenter, 0]]])
return tmp - offset
def set_center_v2(tmp):
"""
tmp:tx16x3
"""
x = tmp[:, 0, 0]
y = tmp[:, 0, 1]
xmin = np.min(x)
xmax = np.max(x)
xcenter = (xmin + xmax) * 0.5
ymin = np.min(y)
ymax = np.max(y)
ycenter = (ymin + ymax) * 0.5
center = np.array([[[xcenter, ycenter, 0]]])
# add some random placement.
w = xmax - xmin
h = ymax - ymin
static = 1
stat = 0.4
if w < static and h < static:
x_offset = np.random.uniform(-stat, +stat)
y_offset = np.random.uniform(-stat, +stat) + 0.4
else:
x_offset = 0
y_offset = 0.4
offset = np.array([[[x_offset, y_offset, 0]]])
return tmp - center + offset
def world_to_camera_sktpos_v2(X, R, t):
# X = reset_center(X).astype('float32')
X = set_center_v2(X).astype('float32')
Rt = wrap(qinverse, R) # Invert rotation
return wrap(qrot, np.tile(Rt, (*X.shape[:-1], 1)), X - t) # Rotate and translate
from scipy.spatial.transform import Rotation as R
def world_to_camera_sktpos_v3(X, args):
"""
random a camera around the person for projection
"""
def wxyz2xyzw(wfist):
"convert w x y z to x y z w, xyzw is used in scipy."
return np.stack([wfist[1], wfist[2], wfist[3], wfist[0]], axis=0)
def xyzw2wxyz(wlast):
"convert x y z w to w x y z, wxyz is used in qrot"
return np.stack([wlast[3], wlast[0], wlast[1], wlast[2]], axis=0)
posi_x = np.random.uniform(args.rpx_min, args.rpx_max)
posi_y = 0
posi_z = np.random.uniform(args.rpz_min, args.rpz_max)
cam_p = np.array([posi_x, posi_y, posi_z]).astype('float32')
euler_x = np.random.uniform(args.rex_min, args.rex_max)
euler_y = np.random.uniform(args.rey_min, args.rey_max)
euler_z = np.random.uniform(args.rez_min, args.rez_max)
cam_r = R.from_euler('xyz', [euler_x, euler_y, euler_z], degrees=True)
cam_q = cam_r.as_quat()
cam_q = xyzw2wxyz(cam_q).astype('float32')
# X = reset_center(X).astype('float32')
X = set_center_v2(X).astype('float32')
Rt = wrap(qinverse, cam_q) # Invert rotation
return wrap(qrot, np.tile(Rt, (*X.shape[:-1], 1)), X - cam_p) # Rotate and translate
def world_to_camera_sktpos_v3_new(X, args):
"""
random a camera around the person for projection
try solving unstable projection training - change to norm sample, or clip more gridient
"""
def norm_sample_withbound(lb, ub):
mu = 0.5 * (lb + ub)
sigma = 0.3 * (ub - lb)
s = np.random.normal(mu, sigma)
if s < lb or s > ub:
s = norm_sample_withbound(lb, ub)
return s
def wxyz2xyzw(wfist):
"convert w x y z to x y z w, xyzw is used in scipy."
return np.stack([wfist[1], wfist[2], wfist[3], wfist[0]], axis=0)
def xyzw2wxyz(wlast):
"convert x y z w to w x y z, wxyz is used in qrot"
return np.stack([wlast[3], wlast[0], wlast[1], wlast[2]], axis=0)
posi_x = norm_sample_withbound(args.rpx_min, args.rpx_max)
posi_y = 0
posi_z = norm_sample_withbound(args.rpz_min, args.rpz_max)
cam_p = np.array([posi_x, posi_y, posi_z]).astype('float32')
euler_x = norm_sample_withbound(args.rex_min, args.rex_max)
euler_y = norm_sample_withbound(args.rey_min, args.rey_max)
euler_z = norm_sample_withbound(args.rez_min, args.rez_max)
cam_r = R.from_euler('xyz', [euler_x, euler_y, euler_z], degrees=True)
cam_q = cam_r.as_quat()
cam_q = xyzw2wxyz(cam_q).astype('float32')
# X = reset_center(X).astype('float32')
X = set_center_v2(X).astype('float32')
Rt = wrap(qinverse, cam_q) # Invert rotation
return wrap(qrot, np.tile(Rt, (*X.shape[:-1], 1)), X - cam_p) # Rotate and translate
def zaxis_randrotation(x_in):
'''
x: t j 3
'''
# x_root = x[:1, :1, :] * 1.0
# x_rooted = x - x_root
#
# random_z = 6.28 * np.random.uniform(0, 1, (1,3)).astype('float32')
# random_z[:, :3] = 0
# random_qz = tgm.angle_axis_to_quaternion(torch.from_numpy(random_z)).numpy().astype('float32')
# x_rooted = camera_to_world(x_rooted, random_qz, np.zeros_like(random_z))
x = torch.from_numpy(x_in).unsqueeze(0) # 1 t j c
x_rooted = posegan_preprocess(x)
return x_rooted.numpy()[0]
def posegan_preprocess(x, aug_rotate=True):
'''
x: b t j 3
'''
x_root = x[:, :1, :1, :] * 1.0
x_rooted = x - x_root
if aug_rotate:
random_z = 6.28 * torch.rand(x.shape[0], 3).to(x.device) # b x 7
random_z[:, :2] = 0
# random_z[:, :] = 0
random_qz = tgm.angle_axis_to_quaternion(random_z)
x_rooted = camera_to_worldByTensor(x_rooted, random_qz, torch.zeros_like(random_z))
return x_rooted + x_root
#############################################################
# def world_to_cameraByTensor(X, R, t):
# tmp_X, tmp_R, tmp_t = X.detach().cpu().numpy(), R.detach().cpu().numpy(), t.detach().cpu().numpy()
# tmp_Rt = wrap(qinverse, tmp_R) # Invert rotation
# out = wrap(qrot, np.tile(tmp_Rt, (1, *tmp_X.shape[1:-1], 1)), tmp_X - tmp_t) # Rotate and translate
#
# def camera_to_worldByTensor(X, R, t):
# tmp_X, tmp_R, tmp_t = X.detach().cpu().numpy(), R.detach().cpu().numpy(), t.detach().cpu().numpy()
# out = wrap(qrot, np.tile(tmp_R, (1, *tmp_X.shape[:-1], 1)), tmp_X) + tmp_t
# out = torch.from_numpy(out)
# return out.to(X.device)
####################################
# project to 2D in torch version
# supposed workable for any size x J x 3
####################################
def project_to_2d(X, camera_params):
"""
Project 3D points to 2D using the Human3.6M camera projection function.
This is a differentiable and batched reimplementation of the original MATLAB script.sh.
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
"""
assert X.shape[-1] == 3
assert len(camera_params.shape) == 2
assert camera_params.shape[-1] == 9
assert X.shape[0] == camera_params.shape[0]
while len(camera_params.shape) < len(X.shape):
camera_params = camera_params.unsqueeze(1)
f = camera_params[..., :2]
c = camera_params[..., 2:4]
k = camera_params[..., 4:7]
p = camera_params[..., 7:]
XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)
r2 = torch.sum(XX[..., :2]**2, dim=len(XX.shape)-1, keepdim=True)
radial = 1 + torch.sum(k * torch.cat((r2, r2**2, r2**3), dim=len(r2.shape)-1), dim=len(r2.shape)-1, keepdim=True)
tan = torch.sum(p*XX, dim=len(XX.shape)-1, keepdim=True)
XXX = XX*(radial + tan) + p*r2
return f*XXX + c
def project_to_2d_linear(X, camera_params):
"""
Project 3D points to 2D using only linear parameters (focal length and principal point).
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
"""
assert X.shape[-1] == 3
assert len(camera_params.shape) == 2
assert camera_params.shape[-1] == 9
assert X.shape[0] == camera_params.shape[0]
while len(camera_params.shape) < len(X.shape):
camera_params = camera_params.unsqueeze(1)
f = camera_params[..., :2]
c = camera_params[..., 2:4]
XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)
return f*XX + c
def project_to_2d_purelinear(X):
"""
Project 3D points to 2D using only linear parameters .
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
"""
assert X.shape[-1] == 3
# XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)
XX = X[..., :2] / X[..., 2:]
# f is the scale that related to the absolute depth information.
f = 2.3
return XX * f
| 11,136 | 31.755882 | 159 | py |
PoseTriplet | PoseTriplet-main/estimator/common/loss.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import numpy as np
def mpjpe(predicted, target):
"""
Mean per-joint position error (i.e. mean Euclidean distance),
often referred to as "Protocol #1" in many papers.
"""
assert predicted.shape == target.shape
return torch.mean(torch.norm(predicted - target, dim=len(target.shape)-1))
def weighted_mpjpe(predicted, target, w):
"""
Weighted mean per-joint position error (i.e. mean Euclidean distance)
"""
assert predicted.shape == target.shape
assert w.shape[0] == predicted.shape[0]
return torch.mean(w * torch.norm(predicted - target, dim=len(target.shape)-1))
def p_mpjpe(predicted, target):
"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
"""
assert predicted.shape == target.shape
muX = np.mean(target, axis=1, keepdims=True)
muY = np.mean(predicted, axis=1, keepdims=True)
X0 = target - muX
Y0 = predicted - muY
normX = np.sqrt(np.sum(X0**2, axis=(1, 2), keepdims=True))
normY = np.sqrt(np.sum(Y0**2, axis=(1, 2), keepdims=True))
X0 /= normX
Y0 /= normY
H = np.matmul(X0.transpose(0, 2, 1), Y0)
U, s, Vt = np.linalg.svd(H)
V = Vt.transpose(0, 2, 1)
R = np.matmul(V, U.transpose(0, 2, 1))
# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
V[:, :, -1] *= sign_detR
s[:, -1] *= sign_detR.flatten()
R = np.matmul(V, U.transpose(0, 2, 1)) # Rotation
tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)
a = tr * normX / normY # Scale
t = muX - a*np.matmul(muY, R) # Translation
# Perform rigid transformation on the input
predicted_aligned = a*np.matmul(predicted, R) + t
# Return MPJPE
return np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape)-1))
def n_mpjpe(predicted, target):
"""
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
"""
assert predicted.shape == target.shape
norm_predicted = torch.mean(torch.sum(predicted**2, dim=3, keepdim=True), dim=2, keepdim=True)
norm_target = torch.mean(torch.sum(target*predicted, dim=3, keepdim=True), dim=2, keepdim=True)
scale = norm_target / norm_predicted
return mpjpe(scale * predicted, target)
def mean_velocity_error(predicted, target):
"""
Mean per-joint velocity error (i.e. mean Euclidean distance of the 1st derivative)
"""
assert predicted.shape == target.shape
velocity_predicted = np.diff(predicted, axis=0)
velocity_target = np.diff(target, axis=0)
return np.mean(np.linalg.norm(velocity_predicted - velocity_target, axis=len(target.shape)-1))
def compute_PCK(gts, preds, scales=1000, eval_joints=None, threshold=150):
PCK_THRESHOLD = threshold
sample_num = len(gts)
total = 0
true_positive = 0
if eval_joints is None:
eval_joints = list(range(gts.shape[1]))
for n in range(sample_num):
gt = gts[n]
pred = preds[n]
# scale = scales[n]
scale = 1000
per_joint_error = np.take(np.sqrt(np.sum(np.power(pred - gt, 2), 1)) * scale, eval_joints, axis=0)
true_positive += (per_joint_error < PCK_THRESHOLD).sum()
total += per_joint_error.size
pck = float(true_positive / total) * 100
return pck
def compute_AUC(gts, preds, scales=1000, eval_joints=None):
# This range of thresholds mimics 'mpii_compute_3d_pck.m', which is provided as part of the
# MPI-INF-3DHP test data release.
thresholds = np.linspace(0, 150, 31)
pck_list = []
for threshold in thresholds:
pck_list.append(compute_PCK(gts, preds, scales, eval_joints, threshold))
auc = np.mean(pck_list)
return auc
def pose_align(predicted, target):
"""
Pose rigid alignment (scale, rotation, and translation),
"""
assert predicted.shape == target.shape
muX = np.mean(target, axis=1, keepdims=True)
muY = np.mean(predicted, axis=1, keepdims=True)
X0 = target - muX
Y0 = predicted - muY
normX = np.sqrt(np.sum(X0 ** 2, axis=(1, 2), keepdims=True))
normY = np.sqrt(np.sum(Y0 ** 2, axis=(1, 2), keepdims=True))
X0 /= normX
Y0 /= normY
H = np.matmul(X0.transpose(0, 2, 1), Y0)
U, s, Vt = np.linalg.svd(H)
V = Vt.transpose(0, 2, 1)
R = np.matmul(V, U.transpose(0, 2, 1))
# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
V[:, :, -1] *= sign_detR
s[:, -1] *= sign_detR.flatten()
R = np.matmul(V, U.transpose(0, 2, 1)) # Rotation
tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)
a = tr * normX / normY # Scale
t = muX - a * np.matmul(muY, R) # Translation
# Perform rigid transformation on the input
predicted_aligned = a * np.matmul(predicted, R) + t
# # Return MPJPE
# return np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape) - 1))
return predicted_aligned
def pose_scaled(predicted, target):
"""
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
"""
assert predicted.shape == target.shape
norm_predicted = torch.mean(torch.sum(predicted ** 2, dim=3, keepdim=True), dim=2, keepdim=True)
norm_target = torch.mean(torch.sum(target * predicted, dim=3, keepdim=True), dim=2, keepdim=True)
scale = norm_target / norm_predicted
# return mpjpe(scale * predicted, target)
return scale * predicted
| 6,020 | 31.722826 | 106 | py |
PoseTriplet | PoseTriplet-main/estimator/common/utils.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import numpy as np
import hashlib
def wrap(func, *args, unsqueeze=False):
"""
Wrap a torch function so it can be called with NumPy arrays.
Input and return types are seamlessly converted.
"""
# Convert input types where applicable
args = list(args)
for i, arg in enumerate(args):
if type(arg) == np.ndarray:
args[i] = torch.from_numpy(arg)
if unsqueeze:
args[i] = args[i].unsqueeze(0)
result = func(*args)
# Convert output types where applicable
if isinstance(result, tuple):
result = list(result)
for i, res in enumerate(result):
if type(res) == torch.Tensor:
if unsqueeze:
res = res.squeeze(0)
result[i] = res.numpy()
return tuple(result)
elif type(result) == torch.Tensor:
if unsqueeze:
result = result.squeeze(0)
return result.numpy()
else:
return result
def deterministic_random(min_value, max_value, data):
digest = hashlib.sha256(data.encode()).digest()
raw_value = int.from_bytes(digest[:4], byteorder='little', signed=False)
return int(raw_value / (2**32 - 1) * (max_value - min_value)) + min_value | 1,470 | 30.297872 | 77 | py |
PoseTriplet | PoseTriplet-main/estimator/common/model.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
class TemporalModelBase(nn.Module):
"""
Do not instantiate this class.
"""
def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal, dropout, channels):
super().__init__()
# Validate input
for fw in filter_widths:
assert fw % 2 != 0, 'Only odd filter widths are supported'
self.num_joints_in = num_joints_in
self.in_features = in_features
self.num_joints_out = num_joints_out
self.filter_widths = filter_widths
self.drop = nn.Dropout(dropout)
self.relu = nn.ReLU(inplace=True)
self.pad = [ filter_widths[0] // 2 ]
self.expand_bn = nn.BatchNorm1d(channels, momentum=0.1)
self.shrink = nn.Conv1d(channels, num_joints_out*3, 1)
def set_bn_momentum(self, momentum):
self.expand_bn.momentum = momentum
for bn in self.layers_bn:
bn.momentum = momentum
def receptive_field(self):
"""
Return the total receptive field of this model as # of frames.
"""
frames = 0
for f in self.pad:
frames += f
return 1 + 2*frames
def total_causal_shift(self):
"""
Return the asymmetric offset for sequence padding.
The returned value is typically 0 if causal convolutions are disabled,
otherwise it is half the receptive field.
"""
frames = self.causal_shift[0]
next_dilation = self.filter_widths[0]
for i in range(1, len(self.filter_widths)):
frames += self.causal_shift[i] * next_dilation
next_dilation *= self.filter_widths[i]
return frames
def forward(self, x):
assert len(x.shape) == 4
assert x.shape[-2] == self.num_joints_in
assert x.shape[-1] == self.in_features
sz = x.shape[:3]
x = x.view(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1)
x = self._forward_blocks(x)
x = x.permute(0, 2, 1)
x = x.view(sz[0], -1, self.num_joints_out, 3)
# x[:,:,:1,:] = x[:,:,:1,:] * 0 # debug, compare with pa1
return x
class TemporalModel(TemporalModelBase):
"""
Reference 3D pose estimation model with temporal convolutions.
This implementation can be used for all use-cases.
"""
def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, dense=False):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
"""
super().__init__(num_joints_in, in_features, num_joints_out, filter_widths, causal, dropout, channels)
self.expand_conv = nn.Conv1d(num_joints_in*in_features, channels, filter_widths[0], bias=False)
layers_conv = []
layers_bn = []
self.causal_shift = [ (filter_widths[0]) // 2 if causal else 0 ]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1)*next_dilation // 2)
self.causal_shift.append((filter_widths[i]//2 * next_dilation) if causal else 0)
layers_conv.append(nn.Conv1d(channels, channels,
filter_widths[i] if not dense else (2*self.pad[-1] + 1),
dilation=next_dilation if not dense else 1,
bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
next_dilation *= filter_widths[i]
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn)
def _forward_blocks(self, x):
x = self.drop(self.relu(self.expand_bn(self.expand_conv(x))))
for i in range(len(self.pad) - 1):
pad = self.pad[i+1]
shift = self.causal_shift[i+1]
res = x[:, :, pad + shift : x.shape[2] - pad + shift]
x = self.drop(self.relu(self.layers_bn[2*i](self.layers_conv[2*i](x))))
x = res + self.drop(self.relu(self.layers_bn[2*i + 1](self.layers_conv[2*i + 1](x))))
x = self.shrink(x)
return x
class TemporalModelOptimized1f(TemporalModelBase):
"""
3D pose estimation model optimized for single-frame batching, i.e.
where batches have input length = receptive field, and output length = 1.
This scenario is only used for training when stride == 1.
This implementation replaces dilated convolutions with strided convolutions
to avoid generating unused intermediate results. The weights are interchangeable
with the reference implementation.
"""
def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
"""
super().__init__(num_joints_in, in_features, num_joints_out, filter_widths, causal, dropout, channels)
self.expand_conv = nn.Conv1d(num_joints_in*in_features, channels, filter_widths[0], stride=filter_widths[0], bias=False)
layers_conv = []
layers_bn = []
self.causal_shift = [ (filter_widths[0] // 2) if causal else 0 ]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1)*next_dilation // 2)
self.causal_shift.append((filter_widths[i]//2) if causal else 0)
layers_conv.append(nn.Conv1d(channels, channels, filter_widths[i], stride=filter_widths[i], bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
next_dilation *= filter_widths[i]
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn)
def _forward_blocks(self, x):
x = self.drop(self.relu(self.expand_bn(self.expand_conv(x))))
for i in range(len(self.pad) - 1):
res = x[:, :, self.causal_shift[i+1] + self.filter_widths[i+1]//2 :: self.filter_widths[i+1]]
x = self.drop(self.relu(self.layers_bn[2*i](self.layers_conv[2*i](x))))
x = res + self.drop(self.relu(self.layers_bn[2*i + 1](self.layers_conv[2*i + 1](x))))
x = self.shrink(x)
return x
| 8,387 | 40.524752 | 128 | py |
PoseTriplet | PoseTriplet-main/estimator/common/camera2world.py | import torch
import numpy as np
from sklearn.decomposition import PCA
from scipy.spatial.transform import Rotation as R
"""
camera to world:
case 1: assume the camera ID is known for each clip.
A. do PCA for 150 clips to found a gravity approximation
B. assume a stand pose, filter out the stand pose from prediction, do PCA among those.
case 2: assume a rough gravity direction
A. do PCA for each clip, choose the gravity approximation by compare them.
case 3: for in the wild scenario
A. manual set
B. do case 1 -> B
case 4: use off-the-shelf ground/gravity estimation
TBD
"""
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector, axis=-1, keepdims=True)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1/M1' and 'v2'::
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.dot(v1_u, v2_u)
def wxyz2xyzw(wfist):
"convert w x y z to x y z w, xyzw is used in scipy"
return np.stack([wfist[1], wfist[2], wfist[3], wfist[0]], axis=0)
def xyzw2wxyz(wfist):
"convert x y z w to w x y z"
return np.stack([wfist[3], wfist[0], wfist[1], wfist[2]], axis=0)
def get_pca_components(pose_cam_in):
"""
input: tx16x3 pose seq in camera coordinate
return: ZXY axis
"""
# x = expert_dict[takes[30]]['predicted_3d']
# print(x.shape)
x = pose_cam_in - pose_cam_in[:, :1, :]
pca = PCA(n_components=3)
principalComponents = pca.fit_transform(x.reshape(-1, 3)).reshape(x.shape)
pca_zxy = pca.components_
pca_xyz = pca_zxy[[1, 2, 0], :]
return unit_vector(pca_xyz)
def check_Z_dir(pose_cam_in, pca_xyz):
"""Check the Z direction is up or down"""
pca_x = pca_xyz[0] * 1.
pca_y = pca_xyz[1] * 1.
pca_z = pca_xyz[2] * 1.
fix_z = [0, -0.97, -0.25] # the appropriate direction
fix_z = unit_vector(fix_z)
dot_avg = angle_between(fix_z, pca_z)
if dot_avg > 0.996:
pca_z = pca_z * +1
pca_x = np.cross(pca_y, pca_z)
elif dot_avg < -0.996:
pca_z = pca_z * -1
pca_x = np.cross(pca_y, pca_z)
else:
pca_z = fix_z * +1
# pca_x = [1, 0, 0]
pca_y = np.cross(pca_z, pca_x)
pca_x = np.cross(pca_y, pca_z)
new_pca_xyz = np.stack([pca_x, pca_y, pca_z])
return new_pca_xyz
# def check_Z_accuracy(pca_xyz, cam_ex):
# """
# a double check for z direction
# """
# pca_z = pca_xyz[2] * 1.
# # q = expert_dict[takes[30]]['cam_ex']
# q = cam_ex * 1.
# r_cam2world = R.from_quat(wxyz2xyzw(q)) # .inv()
# world_z = r_cam2world.inv().apply([0, 0, 1])
#
# acc = angle_between(pca_z, world_z)
# if np.abs(acc) > 0.98:
# pass
# else:
# assert False, "the pca_z seems wrong with value {}, please check!!!".format(acc)
def cam2world_byPCA(pose_cam_in, cam_ex=None):
# input the pose in camera space, then do pca, get the zxy axis,
pca_xyz_incam = get_pca_components(pose_cam_in)
# check the leg direction, assume leg is always down,
pca_xyz_incam = check_Z_dir(pose_cam_in, pca_xyz_incam)
# calculate the Z direction in camera space
# check_Z_accuracy(pca_xyz_incam, cam_ex)
# get a rotation matrix by the world Z direction
world2cam_bypca = R.from_matrix(pca_xyz_incam.T)
cam2world_bypca = world2cam_bypca.inv()
# rotate the pose from camera space to world space
pose_world_out = cam2world_bypca.apply(pose_cam_in.reshape(-1, 3)).reshape(pose_cam_in.shape)
return pose_world_out
def camera_to_worldByPCA(X, R=None):
pose_cam_in = X.detach().cpu().numpy()[0]
cam_ex = R.detach().cpu().numpy()[0] if R is not None else R
pose_world_out = cam2world_byPCA(pose_cam_in, cam_ex)
return torch.from_numpy(pose_world_out.astype('float32')).unsqueeze(0)
| 3,850 | 31.91453 | 97 | py |
PoseTriplet | PoseTriplet-main/estimator/common/quaternion.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
def qrot(q, v):
"""
Rotate vector(s) v about the rotation described by quaternion(s) q.
Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
where * denotes any number of dimensions.
Returns a tensor of shape (*, 3).
"""
assert q.shape[-1] == 4
assert v.shape[-1] == 3
assert q.shape[:-1] == v.shape[:-1]
qvec = q[..., 1:]
uv = torch.cross(qvec, v, dim=len(q.shape)-1)
uuv = torch.cross(qvec, uv, dim=len(q.shape)-1)
return (v + 2 * (q[..., :1] * uv + uuv))
def qinverse(q, inplace=False):
# We assume the quaternion to be normalized
if inplace:
q[..., 1:] *= -1
return q
else:
w = q[..., :1]
xyz = q[..., 1:]
return torch.cat((w, -xyz), dim=len(q.shape)-1) | 1,001 | 27.628571 | 78 | py |
PoseTriplet | PoseTriplet-main/estimator/poseaugtool/model_virtualCam/virtualCam.py | import torch
import torchgeometry as tgm
from torch import nn
from poseaugtool.model_conv1d.conv1d import Conv1dBlock
from common.quaternion import qrot
from common.camera import project_to_2d_purelinear
class DoubleLinear(nn.Module):
def __init__(self, linear_size):
super(DoubleLinear, self).__init__()
self.w1 = nn.Linear(linear_size, linear_size)
self.batch_norm1 = nn.BatchNorm1d(linear_size)
self.w2 = nn.Linear(linear_size, linear_size)
self.batch_norm2 = nn.BatchNorm1d(linear_size)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
y = self.w1(x)
y = self.batch_norm1(y)
y = self.relu(y)
y = self.w2(y)
y = self.batch_norm2(y)
y = self.relu(y)
return y
class Dis_Conv1D(nn.Module):
def __init__(self, nx, ks=3, nh_conv1d=[64, 64]):
super(Dis_Conv1D, self).__init__()
self.nx = nx
self.nh_conv1d = nh_conv1d # hidden dim of conv1d
# encode
self.conv1 = Conv1dBlock(nx, nh_conv1d, activation='leak', ks=ks)
self.out = nn.Conv1d(nh_conv1d[-1], 1, kernel_size=1, stride=1)
def forward(self, x):
'''
:param x: B x T x jd ---> B x jd x T --> B x nh
:return: B
'''
if len(x.shape) == 4:
'B x T x 16 x 3'
b, t, j, d = x.shape
x = x.view(b, t, j*d)
x = x.permute(0, 2, 1).contiguous() # B x T x jd ---> B x jd x T
hs_x = self.conv1(x)
hs_x = self.out(hs_x)
hs_x = torch.mean(hs_x, dim=-1) # B x nh x Ti ---> B x nh
return hs_x
#######################################################################################
# ####### gan generator for virtual camera
#######################################################################################
class G_camera(nn.Module):
"""
v0
"""
def __init__(self, args, nx=48, ks=3, noise_channle=64):
super(G_camera, self).__init__()
self.cam_r_range = args.cam_r_range
self.cam_t_range = args.cam_t_range
self.noise_channle = noise_channle
nh_conv1d = [64, 64] # hidden dim of conv1d
self.conv1 = Conv1dBlock(nx, nh_conv1d, activation='leak', ks=ks)
linear_size = noise_channle + nh_conv1d[-1]
self.wr = nn.Sequential(
DoubleLinear(linear_size),
nn.Linear(linear_size, 3),
nn.Tanh()
)
self.wt = nn.Sequential(
DoubleLinear(linear_size),
nn.Linear(linear_size, 3),
)
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, pose3D, noise_dict=None):
'''
:param pose3D: B x T x j x d, with non-zero root position
:return: dict B x T x j x d
'''
x = pose3D * 1.
if len(x.shape) == 4:
'B x T x 16 x 3'
b, t, j, d = x.shape
x = x.view(b, t, j*d)
# get the feature for RT
x = x.permute(0, 2, 1).contiguous() # B x T x jd ---> B x jd x T
x = self.conv1(x) # B x c x T
x = torch.mean(x, dim=-1) # B x nh x Ti ---> B x nh
# caculate R - QR
# noise = torch.randn(x.shape[0], self.noise_channle, device=x.device)
if noise_dict is None:
noise = torch.randn(x.shape[0], self.noise_channle, device=x.device)
else:
noise = noise_dict['G-cam-r']
r = self.wr(torch.cat((x, noise), dim=1)) * self.cam_r_range
r = r.view(r.size(0), 3)
mask = torch.ones_like(r)
mask[:, 1:] = 0
r = r * mask
qr = tgm.angle_axis_to_quaternion(r)
# caculate T
# noise = torch.randn(x.shape[0], self.noise_channle, device=x.device)
if noise_dict is None:
noise = torch.randn(x.shape[0], self.noise_channle, device=x.device)
else:
noise = noise_dict['G-cam-t']
tmp_t = self.wt(torch.cat((x, noise), dim=1))
tx = self.tanh(tmp_t[:, :1]) * self.cam_t_range * 1.
ty = self.tanh(tmp_t[:, 1:2]) * self.cam_t_range * 0.5
tz = self.sigmoid(tmp_t[:, 2:]) * self.cam_t_range * 1. + 2.
t = torch.cat([tx, ty, tz], dim=1)
# use R T create new 2D-3D pair
pose3D_camed = qrot(qr.unsqueeze(1).unsqueeze(1).repeat(1, *pose3D.shape[1:-1], 1), pose3D) \
+ t.unsqueeze(1).unsqueeze(1).repeat(1, *pose3D.shape[1:-1], 1)
pose2D_camed = project_to_2d_purelinear(pose3D_camed)
return {
'pose3D_camed': pose3D_camed,
'pose2D_camed': pose2D_camed,
'r': r,
't': t,
}
class G_camera_v2(nn.Module):
"""
v2
"""
def __init__(self, args, nx=48, ks=3, noise_channle=64):
super(G_camera_v2, self).__init__()
self.cam_r_range = args.cam_r_range
self.cam_t_range = args.cam_t_range
self.noise_channle = noise_channle
nh_conv1d = [64, 64] # hidden dim of conv1d
self.conv1 = Conv1dBlock(nx, nh_conv1d, activation='leak', ks=ks)
linear_size = noise_channle + nh_conv1d[-1]
self.wr = nn.Sequential(
DoubleLinear(linear_size),
nn.Linear(linear_size, 3),
nn.Tanh()
)
self.wt = nn.Sequential(
DoubleLinear(linear_size),
nn.Linear(linear_size, 3),
)
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, pose3D, noise_dict=None):
'''
:param pose3D: B x T x j x d, with non-zero root position
:return: dict B x T x j x d
'''
x = pose3D * 1.
if len(x.shape) == 4:
'B x T x 16 x 3'
b, t, j, d = x.shape
x = x.view(b, t, j*d)
# get the feature for RT
x = x.permute(0, 2, 1).contiguous() # B x T x jd ---> B x jd x T
x = self.conv1(x) # B x c x T
x = torch.mean(x, dim=-1) # B x nh x Ti ---> B x nh
# caculate R - QR
# noise = torch.randn(x.shape[0], self.noise_channle, device=x.device)
if noise_dict is None:
noise = torch.randn(x.shape[0], self.noise_channle, device=x.device)
else:
noise = noise_dict['G-cam-r']
r = self.wr(torch.cat((x, noise), dim=1)) * self.cam_r_range
r = r.view(r.size(0), 3)
# mask = torch.ones_like(r)
# mask[:, 1:] = 0
# r = r * mask
qr = tgm.angle_axis_to_quaternion(r)
# caculate T
# noise = torch.randn(x.shape[0], self.noise_channle, device=x.device)
if noise_dict is None:
noise = torch.randn(x.shape[0], self.noise_channle, device=x.device)
else:
noise = noise_dict['G-cam-t']
tmp_t = self.wt(torch.cat((x, noise), dim=1))
tx = self.tanh(tmp_t[:, :1]) * self.cam_t_range * 1.
ty = self.tanh(tmp_t[:, 1:2]) * self.cam_t_range * 1. # 0922 0.5
tz = self.sigmoid(tmp_t[:, 2:]) * self.cam_t_range * 1. + 2.
t = torch.cat([tx, ty, tz], dim=1)
# use R T create new 2D-3D pair
pose3D_camed = qrot(qr.unsqueeze(1).unsqueeze(1).repeat(1, *pose3D.shape[1:-1], 1), pose3D) \
+ t.unsqueeze(1).unsqueeze(1).repeat(1, *pose3D.shape[1:-1], 1)
pose2D_camed = project_to_2d_purelinear(pose3D_camed)
return {
'pose3D_camed': pose3D_camed,
'pose2D_camed': pose2D_camed,
'r': r,
't': t,
}
################################################################################################
############################# dis 2D #####################################
################################################################################################
from function.gan_utils import diff
class Pose2DVideoDiscriminator(nn.Module):
def __init__(self, ks=3, nh_conv1d=[64, 64]):
super(Pose2DVideoDiscriminator, self).__init__()
# only check on bone angle, not bone vector.
num_joints = 16
self.num_joints = num_joints
self.traj_path = Dis_Conv1D(16*3, ks, nh_conv1d=nh_conv1d)
def forward(self, inputs_2d):
'''
inputs_2d: B x T x 16 x 2
'''
if len(inputs_2d.shape) == 3 and inputs_2d.shape[-1] == self.num_joints * 2:
'B x T x 48'
b, t, jd = inputs_2d.shape
inputs_2d = inputs_2d.view(b, t, self.num_joints, 2)
b, t, j, d = inputs_2d.shape
assert j == self.num_joints
#################
traj_velocity = diff(inputs_2d)
traj_velocity = torch.norm(traj_velocity, dim=3, keepdim=True)
traj_x = torch.cat([inputs_2d.reshape(b, t, -1),
traj_velocity.reshape(b, t, -1),
], dim=2)
out = self.traj_path(traj_x)
return out
class Pose2DVideoDiscriminatorV2(nn.Module):
def __init__(self, ks=3, nh_conv1d=[64, 64]):
super(Pose2DVideoDiscriminatorV2, self).__init__()
# only check on bone angle, not bone vector.
num_joints = 16
self.num_joints = num_joints
self.traj_path = Dis_Conv1D(16*2, ks, nh_conv1d=nh_conv1d)
def forward(self, inputs_2d):
'''
inputs_2d: B x T x 16 x 2
'''
if len(inputs_2d.shape) == 3 and inputs_2d.shape[-1] == self.num_joints * 2:
'B x T x 48'
b, t, jd = inputs_2d.shape
inputs_2d = inputs_2d.view(b, t, self.num_joints, 2)
b, t, j, d = inputs_2d.shape
assert j == self.num_joints
#################
out = self.traj_path(inputs_2d)
return out
# MLP version
class Pos2dPairDiscriminator(nn.Module):
def __init__(self, num_joints=16, d_ch_num=64): # d_ch_num=100 default
super(Pos2dPairDiscriminator, self).__init__()
# Pose path
self.pose_layer_1 = nn.Linear(num_joints*2*2, d_ch_num)
self.pose_layer_2 = nn.Linear(d_ch_num, d_ch_num)
self.pose_layer_3 = nn.Linear(d_ch_num, d_ch_num)
# self.pose_layer_4 = nn.Linear(d_ch_num, d_ch_num)
self.layer_last = nn.Linear(d_ch_num, d_ch_num)
self.layer_pred = nn.Linear(d_ch_num, 1)
self.relu = nn.LeakyReLU()
def forward(self, x_in):
"""
input: b x 2 x 16 x 2
"""
# Pose path
x = x_in[:, [0, -1]] * 1. # only use the end frame
# x[:, :, [1, 2, 3, 4, 5, 6, 10, 11, 12, 13, 14, 15]] = x.clone()[:, :, [1, 2, 3, 4, 5, 6, 10, 11, 12, 13, 14, 15]] * 0.
x = x.contiguous().view(x.size(0), -1)
d = self.relu(self.pose_layer_1(x))
d = self.relu(self.pose_layer_2(d))
# d = self.relu(self.pose_layer_3(d) + d)
# d = self.pose_layer_4(d)
d_last = self.relu(self.layer_last(d))
d_out = self.layer_pred(d_last)
return d_out
class Pos2dPairDiscriminator_v5(nn.Module):
def __init__(self, num_joints=16, d_ch_num=64): # d_ch_num=100 default
super(Pos2dPairDiscriminator_v5, self).__init__()
# Pose path
self.pose_layer_1 = nn.Linear(num_joints*2*2, d_ch_num)
self.bn_layer_1 = nn.BatchNorm1d(d_ch_num)
self.pose_layer_2 = nn.Linear(d_ch_num, d_ch_num)
self.bn_layer_2 = nn.BatchNorm1d(d_ch_num)
# self.pose_layer_3 = nn.Linear(d_ch_num, d_ch_num)
# self.bn_layer_3 = nn.BatchNorm1d(d_ch_num)
# self.pose_layer_4 = nn.Linear(d_ch_num, d_ch_num)
self.layer_last = nn.Linear(d_ch_num, d_ch_num)
self.bn_last = nn.BatchNorm1d(d_ch_num)
self.layer_pred = nn.Linear(d_ch_num, 1)
self.relu = nn.LeakyReLU()
def forward(self, x_in):
"""
input: b x 2 x 16 x 2
"""
# Pose path
x = x_in[:, [0, -1]] * 1. # only use the end frame
# x[:, :, [1, 2, 3, 4, 5, 6, 10, 11, 12, 13, 14, 15]] = x.clone()[:, :, [1, 2, 3, 4, 5, 6, 10, 11, 12, 13, 14, 15]] * 0.
x = x.contiguous().view(x.size(0), -1)
d = self.relu(self.bn_layer_1(self.pose_layer_1(x)))
d = self.relu(self.bn_layer_2(self.pose_layer_2(d)))
# d = self.relu(self.pose_layer_3(d) + d)
# d = self.pose_layer_4(d)
d_last = self.relu(self.bn_last(self.layer_last(d)))
d_out = self.layer_pred(d_last)
return d_out
from function.gan_utils import get_BoneVecbypose3d
class Pos2dPairDiscriminator_v6(nn.Module):
def __init__(self, num_joints=16, d_ch_num=16): # d_ch_num=100 default
super(Pos2dPairDiscriminator_v6, self).__init__()
self.joint_idx_toD = [0]
num_joints = len(self.joint_idx_toD)
self.bv_idx_toD = [6]
num_jbv = len(self.bv_idx_toD)
# Pose path
self.pose_layer_1 = nn.Linear((num_joints+num_jbv)*2*2, d_ch_num)
self.bn_layer_1 = nn.BatchNorm1d(d_ch_num)
self.pose_layer_2 = nn.Linear(d_ch_num, d_ch_num)
self.bn_layer_2 = nn.BatchNorm1d(d_ch_num)
# self.pose_layer_3 = nn.Linear(d_ch_num, d_ch_num)
# self.bn_layer_3 = nn.BatchNorm1d(d_ch_num)
# self.pose_layer_4 = nn.Linear(d_ch_num, d_ch_num)
self.layer_last = nn.Linear(d_ch_num, d_ch_num)
self.bn_last = nn.BatchNorm1d(d_ch_num)
self.layer_pred = nn.Linear(d_ch_num, 1)
self.relu = nn.LeakyReLU()
def forward(self, x_in):
"""
input: b x 2 x 16 x 2
"""
# Pose path
sz = x_in.shape
x_bv = get_BoneVecbypose3d(x_in.reshape(-1, 16, 2)).reshape(sz[0], sz[1], 15, 2)
x1 = x_bv[:, [0, -1], self.bv_idx_toD] * 1. # only use the end frame
x2 = x_in[:, [0, -1], self.joint_idx_toD] * 1. # only use the end frame
x = torch.cat([x1, x2], dim=2)
# x[:, :, [1, 2, 3, 4, 5, 6, 10, 11, 12, 13, 14, 15]] = x.clone()[:, :, [1, 2, 3, 4, 5, 6, 10, 11, 12, 13, 14, 15]] * 0.
x = x.contiguous().view(x.size(0), -1)
d = self.relu(self.bn_layer_1(self.pose_layer_1(x)))
d = self.relu(self.bn_layer_2(self.pose_layer_2(d)))
# d = self.relu(self.pose_layer_3(d) + d)
# d = self.pose_layer_4(d)
d_last = self.relu(self.bn_last(self.layer_last(d)))
d_out = self.layer_pred(d_last)
return d_out
if __name__ == '__main__':
d = Dis_Conv1D(48, 3)
input = torch.zeros(64, 32, 48) # B x T x J3
out = d(input)
print('out: ', out.shape)
d = Pose2DVideoDiscriminator(3)
input = torch.zeros(64, 75, 16, 2) # B x T x J3
out = d(input)
print('out: ', out.shape)
| 14,619 | 34.228916 | 128 | py |
PoseTriplet | PoseTriplet-main/estimator/poseaugtool/model_conv1d/conv1d.py | import torch.nn as nn
import torch
import torch.nn.functional as F
##############################################################
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None, kernel_size=3 , padding=1, stride=1):
super().__init__()
if not mid_channels:
mid_channels = out_channels // 2
self.double_conv = nn.Sequential(
nn.Conv1d(in_channels, mid_channels, kernel_size=kernel_size, padding=padding,
stride=1, padding_mode='replicate'),
# Conv1DPadded(in_channels, mid_channels, kernel_size=kernel_size, padding=padding,
# stride=1, padding_mode='replicate'),
nn.BatchNorm1d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv1d(mid_channels, out_channels, kernel_size=kernel_size, padding=padding,
stride=stride, padding_mode='replicate'),
# Conv1DPadded(mid_channels, out_channels, kernel_size=kernel_size, padding=padding,
# stride=stride, padding_mode='replicate'),
nn.BatchNorm1d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
self.bilinear = bilinear
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose1d(in_channels, in_channels, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x):
if self.bilinear:
x = nn.functional.interpolate(input=x.unsqueeze(-1),scale_factor=[2,1],
mode='bilinear', align_corners=True).squeeze(-1)
else:
x = self.up(x)
return self.conv(x)
##############################################################
#############
##############################################################
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class OutDoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None, stride=1, padding=1):
super().__init__()
if not mid_channels:
mid_channels = out_channels // 2
self.double_conv = nn.Sequential(
nn.Conv1d(in_channels, mid_channels, kernel_size=3, padding=padding, stride=1, padding_mode='replicate'),
# Conv1DPadded(in_channels, mid_channels, kernel_size=3, padding=padding, stride=1, padding_mode='replicate'),
nn.BatchNorm1d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv1d(mid_channels, out_channels, kernel_size=1),
)
def forward(self, x):
return self.double_conv(x)
##############################################################
#############
##############################################################
class Conv1dTBlock(nn.Module):
'''
simplified version if D too hard
'''
def __init__(self, input_dim, hidden_dims=[128], ks=2, activation='tanh'):
super().__init__()
if activation == 'tanh':
self.activation = torch.tanh
elif activation == 'relu':
self.activation = torch.relu
elif activation == 'sigmoid':
self.activation = torch.sigmoid
elif activation == 'leak':
self.activation = nn.LeakyReLU()
self.out_dim = hidden_dims[-1]
self.affine_layers = nn.ModuleList()
last_dim = input_dim
for nh in hidden_dims:
self.affine_layers.append(nn.Conv1d(last_dim, nh, kernel_size=ks))
ks = 1 # only for first layer.
last_dim = nh
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
return x
class Conv1dBlock(nn.Module):
def __init__(self, input_dim, hidden_dims=[128], ks=2, activation='tanh'):
super().__init__()
if activation == 'tanh':
self.activation = torch.tanh
elif activation == 'relu':
self.activation = torch.relu
elif activation == 'sigmoid':
self.activation = torch.sigmoid
elif activation == 'leak':
self.activation = nn.LeakyReLU(0.2, inplace=True)
self.out_dim = hidden_dims[-1]
self.affine_layers = nn.ModuleList()
last_dim = input_dim
for nh in hidden_dims:
self.affine_layers.append(nn.Conv1d(last_dim, nh, kernel_size=ks))
self.affine_layers.append(nn.Conv1d(nh, nh, kernel_size=1))
last_dim = nh
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
return x
| 5,285 | 36.489362 | 122 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/ribpose2bvh.py |
import random
import torch.optim as optim
import os
import datetime
import os.path as path
from torch.autograd import Variable
from progress.bar import Bar
from time import time
from bvh_skeleton import humanoid_1205_skeleton
from bvh_skeleton.camera import world2cam_sktpos, cam2world_sktpos
import torch
import numpy as np
from tqdm import tqdm
import pickle
import multiprocessing
import math
import glob
import argparse
def ribpose2bvh(take_list, expert_dict):
##########################################################
# save .
# result_dict = {}
takes = take_list
result_all_dict = expert_dict
write_standard_bvh_multi_process(takes, result_all_dict)
##########################################################
return
def write_standard_bvh_multi_process(takes, result_all_dict):
def wrap_write_standard_bvh(take):
predicted_3d_wpos_withroot = np.copy(result_all_dict[take]['skt_wpos']).reshape(-1, 16, 3)
# ground_z = np.min(predicted_3d_wpos_withroot[:, :, -1:])
# ground_z = np.min(predicted_3d_wpos_withroot[:, :, -1:], axis=(1,2), keepdims=True)
# predicted_3d_wpos_withroot[:, :, -1:] = predicted_3d_wpos_withroot[:, :, -1:] - ground_z
bvhfileName = '{}/{}.bvh'.format(traj_save_path, take)
write_standard_bvh(bvhfileName, predicted_3d_wpos_withroot)
# start
task_lst = takes
# num_threads = args.num_threads
for ep in range(math.ceil(len(task_lst) / num_threads)):
p_lst = []
for i in range(num_threads):
idx = ep * num_threads + i
if idx >= len(task_lst):
break
p = multiprocessing.Process(target=wrap_write_standard_bvh, args=(task_lst[idx],))
p_lst.append(p)
for p in p_lst:
p.start()
for p in p_lst:
p.join()
print('complete ep:', ep)
# end.
def write_standard_bvh(bvhfileName, prediction3dpoint):
'''
:param outbvhfilepath:
:param prediction3dpoint:
:return:
'''
#
# prediction3dpoint = world2cam_sktpos(prediction3dpoint) * -1
prediction3dpoint = cam2world_sktpos(prediction3dpoint * -1)
mkd(bvhfileName)
# 16 joint 21 joint
Converter = humanoid_1205_skeleton.SkeletonConverter()
prediction3dpoint = Converter.convert_to_21joint(prediction3dpoint)
# bvh .
human36m_skeleton = humanoid_1205_skeleton.H36mSkeleton()
human36m_skeleton.poses2bvh(prediction3dpoint, output_file=bvhfileName)
# ..mk dir
def mkd(target_dir, get_parent=True):
# get parent path and create
if get_parent:
savedir = os.path.abspath(os.path.join(target_dir, os.pardir))
else:
savedir = target_dir
if not os.path.exists(savedir):
os.makedirs(savedir, exist_ok=True)
if __name__ == '__main__':
"""
convert rib motion to rl motion
"""
parser = argparse.ArgumentParser()
parser.add_argument('--traj_save_path', type=str, default='debug')
args = parser.parse_args()
traj_save_path = args.traj_save_path
npy_folder = '../traj_pose'
npy_path_list = glob.glob(npy_folder+'/*.npy')
array_list = []
for npy_path in npy_path_list:
tmp = np.load(npy_path)
array_list.append(tmp)
rib_pose_seq = np.concatenate(array_list, axis=0)
# add pose in dict.
take_list = ['h36m_take_{:0>3d}'.format(i) for i in range(rib_pose_seq.shape[0] + 600)][600:]
expert_dict = {}
#convert pose 22 joint to 16joint
joint_keep = [0,1,2,3,5,6,7,11,12,13,15,16,17,19,20,21]
for i, take in enumerate(take_list):
expert_dict[take] = {}
tmp_1 = rib_pose_seq[i] * 1.
tmp_2 = tmp_1[1:, joint_keep, :]
expert_dict[take]['skt_wpos'] = tmp_2
######################################################################
num_threads = 12
ribpose2bvh(take_list, expert_dict)
| 3,920 | 26.041379 | 98 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/test.py | import torch
import sys, os
sys.path.insert(0, os.path.dirname(__file__))
from LaFan import LaFan1
from torch.utils.data import Dataset, DataLoader
from model import StateEncoder, \
OffsetEncoder, \
TargetEncoder, \
LSTM, \
Decoder, \
ShortMotionDiscriminator, \
LongMotionDiscriminator
from skeleton import Skeleton
import torch.optim as optim
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
from functions import gen_ztta, write_to_bvhfile
import yaml
import time
import shutil
import imageio
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from remove_fs import remove_fs, save_bvh_from_network_output
from foot_sliding.animation_data import y_rotation_from_positions
from PIL import Image
def plot_pose(pose, cur_frame, prefix):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
parents = [-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 11, 18, 19, 20]
ax.cla()
num_joint = pose.shape[0] // 3
for i, p in enumerate(parents):
if i > 0:
ax.plot([pose[i, 0], pose[p, 0]],\
[pose[i, 2], pose[p, 2]],\
[pose[i, 1], pose[p, 1]], c='r')
ax.plot([pose[i+num_joint, 0], pose[p+num_joint, 0]],\
[pose[i+num_joint, 2], pose[p+num_joint, 2]],\
[pose[i+num_joint, 1], pose[p+num_joint, 1]], c='b')
ax.plot([pose[i+num_joint*2, 0], pose[p+num_joint*2, 0]],\
[pose[i+num_joint*2, 2], pose[p+num_joint*2, 2]],\
[pose[i+num_joint*2, 1], pose[p+num_joint*2, 1]], c='g')
# ax.scatter(pose[:num_joint, 0], pose[:num_joint, 2], pose[:num_joint, 1],c='b')
# ax.scatter(pose[num_joint:num_joint*2, 0], pose[num_joint:num_joint*2, 2], pose[num_joint:num_joint*2, 1],c='b')
# ax.scatter(pose[num_joint*2:num_joint*3, 0], pose[num_joint*2:num_joint*3, 2], pose[num_joint*2:num_joint*3, 1],c='g')
xmin = np.min(pose[:, 0])
ymin = np.min(pose[:, 2])
zmin = np.min(pose[:, 1])
xmax = np.max(pose[:, 0])
ymax = np.max(pose[:, 2])
zmax = np.max(pose[:, 1])
scale = np.max([xmax - xmin, ymax - ymin, zmax - zmin])
xmid = (xmax + xmin) // 2
ymid = (ymax + ymin) // 2
zmid = (zmax + zmin) // 2
ax.set_xlim(xmid - scale // 2, xmid + scale // 2)
ax.set_ylim(ymid - scale // 2, ymid + scale // 2)
ax.set_zlim(zmid - scale // 2, zmid + scale // 2)
plt.draw()
plt.savefig(prefix + '_' + str(cur_frame)+'.png', dpi=200, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
opt = yaml.load(open('./config/test-base.yaml', 'r').read())
model_dir =opt['test']['model_dir']
## initilize the skeleton ##
skeleton_mocap = Skeleton(offsets=opt['data']['offsets'], parents=opt['data']['parents'])
skeleton_mocap.cuda()
skeleton_mocap.remove_joints(opt['data']['joints_to_remove'])
## load train data ##
lafan_data_test = LaFan1(opt['data']['data_dir'], \
seq_len = opt['model']['seq_length'], \
offset = 40,\
train = False, debug=opt['test']['debug'])
lafan_data_test.cur_seq_length = opt['model']['seq_length']
x_mean = lafan_data_test.x_mean.cuda()
x_std = lafan_data_test.x_std.cuda().view(1, 1, opt['model']['num_joints'], 3)
lafan_loader_test = DataLoader(lafan_data_test, \
batch_size=opt['test']['batch_size'], \
shuffle=False, num_workers=opt['data']['num_workers'])
## initialize model and load parameters ##
state_encoder = StateEncoder(in_dim=opt['model']['state_input_dim'])
state_encoder = state_encoder.cuda()
state_encoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'state_encoder.pkl')))
offset_encoder = OffsetEncoder(in_dim=opt['model']['offset_input_dim'])
offset_encoder = offset_encoder.cuda()
offset_encoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'offset_encoder.pkl')))
target_encoder = TargetEncoder(in_dim=opt['model']['target_input_dim'])
target_encoder = target_encoder.cuda()
target_encoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'target_encoder.pkl')))
lstm = LSTM(in_dim=opt['model']['lstm_dim'], hidden_dim = opt['model']['lstm_dim'] * 2)
lstm = lstm.cuda()
lstm.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'lstm.pkl')))
decoder = Decoder(in_dim=opt['model']['lstm_dim'] * 2, out_dim=opt['model']['state_input_dim'])
decoder = decoder.cuda()
decoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'decoder.pkl')))
print('model loaded')
## get positional code ##
if opt['test']['use_ztta']:
ztta = gen_ztta().cuda()
# print('ztta:', ztta.size())
# assert 0
version = opt['test']['version']
# writer = SummaryWriter(log_dir)
loss_total_min = 10000000.0
for epoch in range(opt['test']['num_epoch']):
state_encoder.eval()
offset_encoder.eval()
target_encoder.eval()
lstm.eval()
decoder.eval()
loss_total_list = []
for i_batch, sampled_batch in enumerate(lafan_loader_test):
# if i_batch != 33:
# continue
pred_img_list = []
gt_img_list = []
img_list = []
# print(i_batch, sample_batched['local_q'].size())
loss_pos = 0
loss_quat = 0
loss_contact = 0
loss_root = 0
with torch.no_grad():
# if True:
# state input
local_q = sampled_batch['local_q'].cuda()
root_v = sampled_batch['root_v'].cuda()
contact = sampled_batch['contact'].cuda()
# offset input
root_p_offset = sampled_batch['root_p_offset'].cuda()
local_q_offset = sampled_batch['local_q_offset'].cuda()
local_q_offset = local_q_offset.view(local_q_offset.size(0), -1)
# target input
target = sampled_batch['target'].cuda()
target = target.view(target.size(0), -1)
# root pos
root_p = sampled_batch['root_p'].cuda()
# X
X = sampled_batch['X'].cuda()
bs = 6#np.random.choice(X.size(0), 1)[0]
if False:
print('local_q:', local_q.size(), \
'root_v:', root_v.size(), \
'contact:', contact.size(), \
'root_p_offset:', root_p_offset.size(), \
'local_q_offset:', local_q_offset.size(), \
'target:', target.size())
assert 0
lstm.init_hidden(local_q.size(0))
h_list = []
quat_list = []
quat_list.append(local_q[:,0,].view(local_q.size(0), -1, 4))
pred_list = []
pred_list.append(X[:,0])
bvh_list = []
bvh_list.append(torch.cat([X[:,0,0], local_q[:,0,].view(local_q.size(0), -1)], -1))
contact_list = []
contact_list.append(contact[:,0])
root_list = []
root_list.append(X[:,0,0])
# print(X.size())
for t in range(opt['model']['seq_length'] - 1):
# root pos
if t == 0:
root_p_t = root_p[:,t]
local_q_t = local_q[:,t]
local_q_t = local_q_t.view(local_q_t.size(0), -1)
contact_t = contact[:,t]
root_v_t = root_v[:,t]
else:
root_p_t = root_pred[0]
local_q_t = local_q_pred[0]
contact_t = contact_pred[0]
root_v_t = root_v_pred[0]
# state input
state_input = torch.cat([local_q_t, root_v_t, contact_t], -1)
# offset input
root_p_offset_t = root_p_offset - root_p_t
local_q_offset_t = local_q_offset - local_q_t
# print('root_p_offset_t:', root_p_offset_t.size(), 'local_q_offset_t:', local_q_offset_t.size())
offset_input = torch.cat([root_p_offset_t, local_q_offset_t], -1)
# target input
target_input = target
# print('state_input:',state_input.size())
h_state = state_encoder(state_input)
h_offset = offset_encoder(offset_input)
h_target = target_encoder(target_input)
if opt['test']['use_ztta']:
h_state += ztta[:, t]
h_offset += ztta[:, t]
h_target += ztta[:, t]
if opt['test']['use_adv']:
tta = opt['model']['seq_length'] - 2 - t
if tta < 5:
lambda_target = 0.0
elif tta >=5 and tta < 30:
lambda_target = (tta - 5) / 25.0
else:
lambda_target = 1.0
h_offset += 0.5 * lambda_target * torch.cuda.FloatTensor(h_offset.size()).normal_()
h_target += 0.5 * lambda_target * torch.cuda.FloatTensor(h_target.size()).normal_()
h_in = torch.cat([h_state, h_offset, h_target], -1).unsqueeze(0)
h_out = lstm(h_in)
# print('h_out:', h_out.size())
h_pred, contact_pred = decoder(h_out)
local_q_v_pred = h_pred[:,:,:opt['model']['target_input_dim']]
local_q_pred = local_q_v_pred + local_q_t
# print('q_pred:', q_pred.size())
local_q_pred_ = local_q_pred.view(local_q_pred.size(0), local_q_pred.size(1), -1, 4)
local_q_pred_ = local_q_pred_ / torch.norm(local_q_pred_, dim = -1, keepdim = True)
# print("local_q_pred_:", local_q_pred_.size())
quat_list.append(local_q_pred_[0])
root_v_pred = h_pred[:,:,opt['model']['target_input_dim']:]
root_pred = root_v_pred + root_p_t
root_list.append(root_pred[0])
# print(''contact:'', contact_pred.size())
# print('root_pred:', root_pred.size())
bvh_list.append(torch.cat([root_pred[0], local_q_pred_[0].view(local_q_pred_.size(1), -1)], -1))
pos_pred = skeleton_mocap.forward_kinematics(local_q_pred_, root_pred)
pos_next = X[:,t+1]
local_q_next = local_q[:,t+1]
local_q_next = local_q_next.view(local_q_next.size(0), -1)
root_p_next = root_p[:,t+1]
contact_next = contact[:,t+1]
# print(pos_pred.size(), x_std.size())
loss_pos += torch.mean(torch.abs(pos_pred[0] - pos_next) / x_std) / opt['model']['seq_length']
loss_quat += torch.mean(torch.abs(local_q_pred[0] - local_q_next)) / opt['model']['seq_length']
loss_root += torch.mean(torch.abs(root_pred[0] - root_p_next) / x_std[:,:,0]) / opt['model']['seq_length']
loss_contact += torch.mean(torch.abs(contact_pred[0] - contact_next)) / opt['model']['seq_length']
pred_list.append(pos_pred[0])
contact_list.append(contact_pred[0])
# if i_batch < 49:
# print("pos_pred:", pos_pred.size())
if opt['test']['save_img']:
plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\
pos_pred[0, bs].view(22, 3).detach().cpu().numpy(),\
X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\
t, '../results'+version+'/pred')
plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\
X[bs,t+1].view(22, 3).detach().cpu().numpy(),\
X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\
t, '../results'+version+'/gt')
pred_img = Image.open('../results'+version+'/pred_'+str(t)+'.png', 'r')
gt_img = Image.open('../results'+version+'/gt_'+str(t)+'.png', 'r')
pred_img_list.append(pred_img)
gt_img_list.append(gt_img)
img_list.append(np.concatenate([pred_img, gt_img.resize(pred_img.size)], 1))
# print('pivots:', pivots.shape)
# print('rot_data.size:', rot_data.shape)
if opt['test']['save_bvh']:
# print("bs:", bs)
bvh_data = torch.cat([x[bs].unsqueeze(0) for x in bvh_list], 0).detach().cpu().numpy()
# print('bvh_data:', bvh_data.shape)
# print('bvh_data:', bvh_data[0,3:7])
# assert 0
write_to_bvhfile(bvh_data, ('../bvh_seq/test_%03d.bvh' % i_batch), opt['data']['joints_to_remove'])
# assert 0
contact_data = torch.cat([x[bs].unsqueeze(0) for x in contact_list], 0).detach().cpu().numpy()
# rot_data = torch.cat([x[bs].unsqueeze(0) for x in quat_list], 0).detach().cpu().numpy()
# root_data = torch.cat([x[bs].unsqueeze(0) for x in root_list], 0).detach().cpu().numpy()
# pred_pose = torch.cat([x[bs].unsqueeze(0) for x in pred_list], 0).detach().cpu().numpy()
# quaters, pivots = y_rotation_from_positions(pred_pose, hips = (1,5), sdrs = (14,18))
# motion = np.concatenate([rot_data.reshape(rot_data.shape[0], -1),\
# root_data,\
# pivots], -1)
# motion = motion.transpose(1,0)
foot = contact_data.transpose(1,0)
foot[foot > 0.5] = 1.0
foot[foot <= 0.5] = 0.0
# print('foot[0]:',foot[0])
glb = remove_fs(('../bvh_seq/test_%03d.bvh' % i_batch), \
foot, \
fid_l=(3, 4), \
fid_r=(7, 8),\
output_path=("../bvh_seq_after"+version+"/test_%03d.bvh" % i_batch))
fix_img_list = []
for t in range(opt['model']['seq_length']):
plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\
glb[t],\
X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\
t, '../results'+version+'/fixed')
plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\
X[bs,t].view(22, 3).detach().cpu().numpy(),\
X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\
t, '../results'+version+'/gt')
fix_img = Image.open('../results'+version+'/fixed_'+str(t)+'.png', 'r')
gt_img = Image.open('../results'+version+'/gt_'+str(t)+'.png', 'r')
fix_img_list.append(np.concatenate([fix_img, gt_img.resize(fix_img.size)], 1))
imageio.mimsave(('../gif'+version+'/img_fix_%03d.gif' % i_batch), fix_img_list, duration=0.1)
# save_bvh_from_network_output(motion, output_path=("../bvh_seq_after/test_%03d.bvh" % i_batch))
# if i_batch < 49:
if opt['test']['save_img'] and opt['test']['save_gif']:
imageio.mimsave(('../gif'+version+'/img_%03d.gif' % i_batch), img_list, duration=0.1)
if opt['test']['save_pose']:
gt_pose = X[bs,:].view(opt['model']['seq_length'], 22, 3).detach().cpu().numpy()
pred_pose = torch.cat([x[bs].unsqueeze(0) for x in pred_list], 0).detach().cpu().numpy()
plt.clf()
joint_idx = 13
plt.plot(range(opt['model']['seq_length']), gt_pose[:,joint_idx,0])
plt.plot(range(opt['model']['seq_length']), pred_pose[:,joint_idx,0])
plt.legend(['gt', 'pred'])
plt.savefig('../results'+version+'/pose_%03d.png' % i_batch)
plt.close()
# if opt['test']['save_img'] and i_batch > 49:
# break
if opt['test']['save_pose'] and i_batch > 49:
break
# print("train epoch: %03d, cur total loss:%.3f, cur best loss:%.3f" % (epoch, loss_total_cur, loss_total_min))
| 17,898 | 51.336257 | 126 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/py_utils.py | import os
import numpy as np
import torch
def merge_dict(dict_list):
ret = {}
for dict in dict_list:
for key, value in dict.items():
try:
ret[key]
except KeyError:
ret[key] = 0.0
ret[key] += value
return ret
def update_dict(old_dict, new_dict):
for key, value in new_dict.items():
old_dict[key] = value
def ensure_dir(path):
"""
create path by first checking its existence,
:param paths: path
:return:
"""
if not os.path.exists(path):
print("Create folder ", path)
os.makedirs(path)
else:
print(path, " already exists.")
def ensure_dirs(paths):
"""
create paths by first checking their existence
:param paths: list of path
:return:
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
ensure_dir(path)
else:
ensure_dir(paths)
def write_loss(iterations, trainer, train_writer):
for key, value in trainer.loss_dict.items():
train_writer.add_scalar(key, value, iterations + 1)
def print_composite(data, beg=""):
if isinstance(data, dict):
print(f'{beg} dict, size = {len(data)}')
for key, value in data.items():
print(f' {beg}{key}:')
print_composite(value, beg + " ")
elif isinstance(data, list):
print(f'{beg} list, len = {len(data)}')
for i, item in enumerate(data):
print(f' {beg}item {i}')
print_composite(item, beg + " ")
elif isinstance(data, np.ndarray) or isinstance(data, torch.Tensor):
print(f'{beg} array of size {data.shape}')
else:
print(f'{beg} {data}')
def to_float(item):
if isinstance(item, torch.Tensor):
item = item.to('cpu').numpy()
if isinstance(item, np.ndarray):
if len(item.reshape(-1)) == 1:
item = float(item)
return item
def mkd(target_dir, get_parent=False):
# get parent path and create
if get_parent:
savedir = os.path.abspath(os.path.join(target_dir, os.pardir))
else:
savedir = target_dir
if not os.path.exists(savedir):
os.makedirs(savedir, exist_ok=True)
def count_param(model, name):
# print param number size.
model_params = 0
for parameter in model.parameters():
model_params += parameter.numel()
print('INFO: Trainable parameter count for model {} is:{}'.format(name, model_params))
if __name__ == "__main__":
bla = np.random.rand(1, 1, 1)
bla = torch.tensor(bla)
cla = np.random.rand(2, 3)
cla = torch.tensor(cla)
print(to_float(bla))
print(to_float(cla))
print(to_float("bla"))
| 2,734 | 25.553398 | 90 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/functions.py | import torch
import numpy as np
import torch.nn as nn
from quaternion import qeuler_np
from remove_fs import remove_fs
def PLU(x, alpha = 0.1, c = 1.0):
relu = nn.ReLU()
o1 = alpha * (x + c) - c
o2 = alpha * (x - c) + c
o3 = x - relu(x - o2)
o4 = relu(o1 - o3) + o3
return o4
def gen_ztta(dim = 256, length = 50):
### currently without T_max ###
ztta = np.zeros((1, length, dim))
for t in range(length):
for d in range(dim):
if d % 2 == 0:
ztta[:, t, d] = np.sin(1.0 * (length - t) / 10000 ** (d / dim))
else:
ztta[:, t, d] = np.cos(1.0 * (length - t) / 10000 ** (d / dim))
return torch.from_numpy(ztta.astype(np.float32))
def gen_ztar(sigma = 1.0, length = 50):
### currently noise term in not inroduced ###
lambda_tar = []
for t in range(length):
if t < 5:
lambda_tar.append(0)
elif t < 30 and t >= 5:
lambda_tar.append((t - 5.0) / 25.0)
else:
lambda_tar.append(1)
lambda_tar = np.array(lambda_tar)
return torch.from_numpy(lambda_tar)
def write_to_bvhfile(data, filename, joints_to_remove):
fout = open(filename, 'w')
line_cnt = 0
for line in open('./example.bvh', 'r'):
fout.write(line)
line_cnt += 1
if line_cnt >= 132:
break
fout.write(('Frames: %d\n' % data.shape[0]))
fout.write('Frame Time: 0.033333\n')
pose_data = qeuler_np(data[:,3:].reshape(data.shape[0], -1, 4), order='zyx', use_gpu=False)
# pose_data = np.concatenate([pose_data[:,:5], np.zeros_like(pose_data[:,0:1]),\
# pose_data[:,5:9], np.zeros_like(pose_data[:,0:1]),\
# pose_data[:,9:14], np.zeros_like(pose_data[:,0:1]),\
# pose_data[:,14:18], np.zeros_like(pose_data[:,0:1]),\
# pose_data[:,18:22], np.zeros_like(pose_data[:,0:1])], 1)
pose_data = pose_data / np.pi * 180.0
for t in range(data.shape[0]):
line = '%f %f %f ' % (data[t, 0], data[t, 1], data[t, 2])
for d in range(pose_data.shape[1] - 1):
line += '%f %f %f ' % (pose_data[t, d, 2], pose_data[t, d, 1], pose_data[t, d, 0])
line += '%f %f %f\n' % (pose_data[t, -1, 2], pose_data[t, -1, 1], pose_data[t, -1, 0])
fout.write(line)
fout.close()
| 2,426 | 36.338462 | 95 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/model.py | import torch
import numpy as np
import torch.nn as nn
from functions import PLU
class StateEncoder(nn.Module):
def __init__(self, in_dim = 128, hidden_dim = 512, out_dim = 256):
super(StateEncoder, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = out_dim
self.fc0 = nn.Linear(in_dim, hidden_dim, bias=True)
self.fc1 = nn.Linear(hidden_dim, out_dim, bias=True)
def forward(self, x):
x = self.fc0(x)
x = PLU(x)
x = self.fc1(x)
x = PLU(x)
return x
class OffsetEncoder(nn.Module):
def __init__(self, in_dim = 128, hidden_dim = 512, out_dim = 256):
super(OffsetEncoder, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = out_dim
self.fc0 = nn.Linear(in_dim, hidden_dim, bias=True)
self.fc1 = nn.Linear(hidden_dim, out_dim, bias=True)
def forward(self, x):
x = self.fc0(x)
x = PLU(x)
x = self.fc1(x)
x = PLU(x)
return x
class TargetEncoder(nn.Module):
def __init__(self, in_dim = 128, hidden_dim = 512, out_dim = 256):
super(TargetEncoder, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = out_dim
self.fc0 = nn.Linear(in_dim, hidden_dim, bias=True)
self.fc1 = nn.Linear(hidden_dim, out_dim, bias=True)
def forward(self, x):
x = self.fc0(x)
x = PLU(x)
x = self.fc1(x)
x = PLU(x)
return x
class LSTM(nn.Module):
def __init__(self, in_dim = 128, hidden_dim = 768, num_layer = 1):
super(LSTM, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.num_layer = num_layer
self.rnn = nn.LSTM(self.in_dim, self.hidden_dim, self.num_layer)
def init_hidden(self, batch_size):
self.h = torch.zeros((self.num_layer, batch_size, self.hidden_dim)).cuda()
self.c = torch.zeros((self.num_layer, batch_size, self.hidden_dim)).cuda()
def forward(self, x):
x, (self.h, self.c) = self.rnn(x, (self.h, self.c))
return x
class Decoder(nn.Module):
def __init__(self, in_dim = 128, hidden_dim = 512, out_dim = 256):
super(Decoder, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = out_dim
self.fc0 = nn.Linear(in_dim, hidden_dim, bias=True)
self.fc1 = nn.Linear(hidden_dim, hidden_dim // 2, bias=True)
self.fc2 = nn.Linear(hidden_dim // 2, out_dim - 4, bias=True)
self.fc_conct = nn.Linear(hidden_dim // 2, 4, bias=True)
self.ac_sig = nn.Sigmoid()
def forward(self, x):
x = self.fc0(x)
x = PLU(x)
x = self.fc1(x)
x = PLU(x)
o1 = self.fc2(x)
o2 = self.ac_sig(self.fc_conct(x))
return o1, o2
class ShortMotionDiscriminator(nn.Module):
def __init__(self, length = 3, in_dim = 128, hidden_dim = 512, out_dim = 1):
super(ShortMotionDiscriminator, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = out_dim
self.length = length
self.fc0 = nn.Conv1d(in_dim, hidden_dim, kernel_size = self.length, bias=True)
self.fc1 = nn.Conv1d(hidden_dim, hidden_dim // 2, kernel_size = 1, bias=True)
self.fc2 = nn.Conv1d(hidden_dim // 2, out_dim, kernel_size = 1, bias=True)
def forward(self, x):
x = self.fc0(x)
x = PLU(x)
x = self.fc1(x)
x = PLU(x)
x = self.fc2(x)
return x
class LongMotionDiscriminator(nn.Module):
def __init__(self, length = 10, in_dim = 128, hidden_dim = 512, out_dim = 1):
super(LongMotionDiscriminator, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = out_dim
self.length = length
self.fc0 = nn.Conv1d(in_dim, hidden_dim, kernel_size = self.length, bias=True)
self.fc1 = nn.Conv1d(hidden_dim, hidden_dim // 2, kernel_size = 1, bias=True)
self.fc2 = nn.Conv1d(hidden_dim // 2, out_dim, kernel_size = 1, bias=True)
def forward(self, x):
x = self.fc0(x)
x = PLU(x)
x = self.fc1(x)
x = PLU(x)
x = self.fc2(x)
return x
if __name__=="__main__":
state_encoder = StateEncoder()
x = torch.zeros((32, 128))
print(state_encoder(x).size())
offset_encoder = OffsetEncoder()
x = torch.zeros((32, 128))
print(offset_encoder(x).size())
target_encoder = TargetEncoder()
x = torch.zeros((32, 128))
print(target_encoder(x).size())
lstm = LSTM(32)
x = torch.zeros((10, 32, 128))
print(lstm(x).size())
decoder = Decoder()
x = torch.zeros((32, 128))
print(decoder(x)[0].size())
short_dis = ShortMotionDiscriminator()
x = torch.zeros((32, 128, 50))
print(short_dis(x).size())
long_dis = LongMotionDiscriminator()
x = torch.zeros((32, 128, 50))
print(long_dis(x).size())
| 5,161 | 30.096386 | 86 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/remove_fs.py | import os
import sys
import numpy as np
import torch
import argparse
from tqdm import tqdm
BASEPATH = os.path.dirname(__file__)
from os.path import join as pjoin
sys.path.insert(0, BASEPATH)
sys.path.insert(0, pjoin(BASEPATH, '..'))
import foot_sliding.BVH as BVH
from foot_sliding.InverseKinematics import JacobianInverseKinematics
from foot_sliding.animation_data import AnimationData
def softmax(x, **kw):
softness = kw.pop("softness", 1.0)
maxi, mini = np.max(x, **kw), np.min(x, **kw)
return maxi + np.log(softness + np.exp(mini - maxi))
def softmin(x, **kw):
return -softmax(-x, **kw)
def alpha(t):
return 2.0 * t * t * t - 3.0 * t * t + 1
def lerp(a, l, r):
return (1 - a) * l + a * r
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default="bla_3d")
return parser.parse_args()
def nrot2anim(filename):
anim = AnimationData.from_BVH(filename, downsample=1)
# anim = AnimationData.from_network_output(nrot)
bvh, names, ftime = anim.get_BVH()
anim = AnimationData.from_rotations_and_root_positions(np.array(bvh.rotations), bvh.positions[:, 0, :])
glb = anim.get_global_positions(trim=False)
# print('bvh.rotations:', np.array(bvh.rotations)[0,0])
# assert 0
return (bvh, names, ftime), glb
def save_bvh_from_network_output(nrot, output_path):
anim = AnimationData.from_network_output(nrot)
bvh, names, ftime = anim.get_BVH()
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
BVH.save(output_path, bvh, names, ftime)
def remove_fs(filename, foot, output_path, fid_l=(4, 5), fid_r=(9, 10), interp_length=5, force_on_floor=False):
(anim, names, ftime), glb = nrot2anim(filename)
T = len(glb)
fid = list(fid_l) + list(fid_r)
fid_l, fid_r = np.array(fid_l), np.array(fid_r)
foot_heights = np.minimum(glb[:, fid_l, 1],
glb[:, fid_r, 1]).min(axis=1) # [T, 2] -> [T]
# print(np.min(foot_heights))
floor_height = softmin(foot_heights, softness=0.5, axis=0)
# print(floor_height)
glb[:, :, 1] -= floor_height
anim.positions[:, 0, 1] -= floor_height
glb_cp = glb.copy()
for i, fidx in enumerate(fid):
fixed = foot[i] # [T]
"""
for t in range(T):
glb[t, fidx][1] = max(glb[t, fidx][1], 0.25)
"""
s = 0
while s < T:
while s < T and fixed[s] == 0:
s += 1
if s >= T:
break
t = s
avg = glb[t, fidx].copy()
while t + 1 < T and fixed[t + 1] == 1:
t += 1
avg += glb[t, fidx].copy()
avg /= (t - s + 1)
if force_on_floor:
avg[1] = 0.0
for j in range(s, t + 1):
glb[j, fidx] = avg.copy()
# print(fixed[s - 1:t + 2])
s = t + 1
for s in range(T):
if fixed[s] == 1:
continue
l, r = None, None
consl, consr = False, False
for k in range(interp_length):
if s - k - 1 < 0:
break
if fixed[s - k - 1]:
l = s - k - 1
consl = True
break
for k in range(interp_length):
if s + k + 1 >= T:
break
if fixed[s + k + 1]:
r = s + k + 1
consr = True
break
if not consl and not consr:
continue
if consl and consr:
litp = lerp(alpha(1.0 * (s - l + 1) / (interp_length + 1)),
glb[s, fidx], glb[l, fidx])
ritp = lerp(alpha(1.0 * (r - s + 1) / (interp_length + 1)),
glb[s, fidx], glb[r, fidx])
itp = lerp(alpha(1.0 * (s - l + 1) / (r - l + 1)),
ritp, litp)
glb[s, fidx] = itp.copy()
continue
if consl:
litp = lerp(alpha(1.0 * (s - l + 1) / (interp_length + 1)),
glb[s, fidx], glb[l, fidx])
glb[s, fidx] = litp.copy()
continue
if consr:
ritp = lerp(alpha(1.0 * (r - s + 1) / (interp_length + 1)),
glb[s, fidx], glb[r, fidx])
glb[s, fidx] = ritp.copy()
targetmap = {}
for j in range(glb.shape[1]):
targetmap[j] = glb[:, j]
ik = JacobianInverseKinematics(anim, targetmap, iterations=10, damping=4.0,
silent=False)
ik()
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
BVH.save(output_path, anim, names, ftime)
return glb
def process_data(filename, style_and_content=True, output_dir=None, selected=None):
# data = torch.load(filename, map_location="cpu")
# feet = data["foot_contact"]
# motions = data["trans"]
# if selected is None:
# selected = range(len(motions))
# for num in tqdm(selected):
for num in range(1):
# feet = feet[num].detach().numpy()
# if style_and_content:
# style = styles[num].detach().numpy()
# content = contents[num].detach().numpy()
# save_bvh_from_network_output(style.copy(), output_path=pjoin(output_dir, "style_%02d.bvh" % num))
# save_bvh_from_network_output(content.copy(), output_path=pjoin(output_dir, "content_%02d.bvh" % num))
motion = np.ones((92, 100))
foot = np.zeros((4, 100))
# motion = motions[num].detach().numpy()
save_bvh_from_network_output(motion, output_path=pjoin(output_dir, "raw_%02d.bvh" % num))
remove_fs(motion, foot, output_path=pjoin(output_dir, "after_%02d.bvh" % num))
def main(args):
output_dir = args.data + "_bvh"
try:
os.mkdir(output_dir)
except FileExistsError:
pass
process_data(args.data, output_dir=output_dir)
if __name__ == '__main__':
args = parse_args()
main(args)
| 6,250 | 29.642157 | 115 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/quaternion.py |
import torch
import numpy as np
# PyTorch-backed implementations
def qmul(q, r):
"""
Multiply quaternion(s) q with quaternion(s) r.
Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions.
Returns q*r as a tensor of shape (*, 4).
"""
assert q.shape[-1] == 4
assert r.shape[-1] == 4
original_shape = q.shape
# Compute outer product
terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4))
w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]
x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]
y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]
z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]
return torch.stack((w, x, y, z), dim=1).view(original_shape)
def qrot(q, v):
"""
Rotate vector(s) v about the rotation described by quaternion(s) q.
Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
where * denotes any number of dimensions.
Returns a tensor of shape (*, 3).
"""
assert q.shape[-1] == 4
assert v.shape[-1] == 3
assert q.shape[:-1] == v.shape[:-1]
original_shape = list(v.shape)
q = q.view(-1, 4)
v = v.view(-1, 3)
qvec = q[:, 1:]
uv = torch.cross(qvec, v, dim=1)
uuv = torch.cross(qvec, uv, dim=1)
return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape)
def qeuler(q, order, epsilon=0):
"""
Convert quaternion(s) q to Euler angles.
Expects a tensor of shape (*, 4), where * denotes any number of dimensions.
Returns a tensor of shape (*, 3).
"""
assert q.shape[-1] == 4
original_shape = list(q.shape)
original_shape[-1] = 3
q = q.view(-1, 4)
q0 = q[:, 0]
q1 = q[:, 1]
q2 = q[:, 2]
q3 = q[:, 3]
if order == 'xyz':
x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))
y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1+epsilon, 1-epsilon))
z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))
elif order == 'yzx':
x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))
y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))
z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1+epsilon, 1-epsilon))
elif order == 'zxy':
x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1+epsilon, 1-epsilon))
y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q1 * q1 + q2 * q2))
z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q1 * q1 + q3 * q3))
elif order == 'xzy':
x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))
y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))
z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1+epsilon, 1-epsilon))
elif order == 'yxz':
x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1+epsilon, 1-epsilon))
y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2*(q1 * q1 + q2 * q2))
z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2*(q1 * q1 + q3 * q3))
elif order == 'zyx':
x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))
y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1+epsilon, 1-epsilon))
z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))
else:
raise
return torch.stack((x, y, z), dim=1).view(original_shape)
# Numpy-backed implementations
def qmul_np(q, r):
q = torch.from_numpy(q).contiguous()
r = torch.from_numpy(r).contiguous()
return qmul(q, r).numpy()
def qrot_np(q, v):
q = torch.from_numpy(q).contiguous()
v = torch.from_numpy(v).contiguous()
return qrot(q, v).numpy()
def qeuler_np(q, order, epsilon=0, use_gpu=False):
if use_gpu:
q = torch.from_numpy(q).cuda()
return qeuler(q, order, epsilon).cpu().numpy()
else:
q = torch.from_numpy(q).contiguous()
return qeuler(q, order, epsilon).numpy()
def qfix(q):
"""
Enforce quaternion continuity across the time dimension by selecting
the representation (q or -q) with minimal distance (or, equivalently, maximal dot product)
between two consecutive frames.
Expects a tensor of shape (L, J, 4), where L is the sequence length and J is the number of joints.
Returns a tensor of the same shape.
"""
assert len(q.shape) == 3
assert q.shape[-1] == 4
result = q.copy()
dot_products = np.sum(q[1:]*q[:-1], axis=2)
mask = dot_products < 0
mask = (np.cumsum(mask, axis=0)%2).astype(bool)
result[1:][mask] *= -1
return result
def expmap_to_quaternion(e):
"""
Convert axis-angle rotations (aka exponential maps) to quaternions.
Stable formula from "Practical Parameterization of Rotations Using the Exponential Map".
Expects a tensor of shape (*, 3), where * denotes any number of dimensions.
Returns a tensor of shape (*, 4).
"""
assert e.shape[-1] == 3
original_shape = list(e.shape)
original_shape[-1] = 4
e = e.reshape(-1, 3)
theta = np.linalg.norm(e, axis=1).reshape(-1, 1)
w = np.cos(0.5*theta).reshape(-1, 1)
xyz = 0.5*np.sinc(0.5*theta/np.pi)*e
return np.concatenate((w, xyz), axis=1).reshape(original_shape)
def euler_to_quaternion(e, order):
"""
Convert Euler angles to quaternions.
"""
assert e.shape[-1] == 3
original_shape = list(e.shape)
original_shape[-1] = 4
e = e.reshape(-1, 3)
x = e[:, 0]
y = e[:, 1]
z = e[:, 2]
rx = np.stack((np.cos(x/2), np.sin(x/2), np.zeros_like(x), np.zeros_like(x)), axis=1)
ry = np.stack((np.cos(y/2), np.zeros_like(y), np.sin(y/2), np.zeros_like(y)), axis=1)
rz = np.stack((np.cos(z/2), np.zeros_like(z), np.zeros_like(z), np.sin(z/2)), axis=1)
result = None
for coord in order:
if coord == 'x':
r = rx
elif coord == 'y':
r = ry
elif coord == 'z':
r = rz
else:
raise
if result is None:
result = r
else:
result = qmul_np(result, r)
# Reverse antipodal representation to have a non-negative "w"
if order in ['xyz', 'yzx', 'zxy']:
result *= -1
return result.reshape(original_shape)
| 6,413 | 32.936508 | 102 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/test-randomfuture-v1.py | import torch
import sys, os
sys.path.insert(0, os.path.dirname(__file__))
from LaFan import LaFan1
from torch.utils.data import Dataset, DataLoader
from model import StateEncoder, \
OffsetEncoder, \
TargetEncoder, \
LSTM, \
Decoder, \
ShortMotionDiscriminator, \
LongMotionDiscriminator
from skeleton import Skeleton
import torch.optim as optim
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
from functions import gen_ztta, write_to_bvhfile
import yaml
import time
import shutil
import imageio
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from remove_fs import remove_fs, save_bvh_from_network_output
from foot_sliding.animation_data import y_rotation_from_positions
from PIL import Image
from py_utils import mkd, count_param
import argparse
"""
"""
def plot_pose(pose, cur_frame, prefix):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
parents = [-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 11, 18, 19, 20]
ax.cla()
num_joint = pose.shape[0] // 3
for i, p in enumerate(parents):
if i > 0:
ax.plot([pose[i, 0], pose[p, 0]],\
[pose[i, 2], pose[p, 2]],\
[pose[i, 1], pose[p, 1]], c='r')
ax.plot([pose[i+num_joint, 0], pose[p+num_joint, 0]],\
[pose[i+num_joint, 2], pose[p+num_joint, 2]],\
[pose[i+num_joint, 1], pose[p+num_joint, 1]], c='b')
ax.plot([pose[i+num_joint*2, 0], pose[p+num_joint*2, 0]],\
[pose[i+num_joint*2, 2], pose[p+num_joint*2, 2]],\
[pose[i+num_joint*2, 1], pose[p+num_joint*2, 1]], c='g')
# ax.scatter(pose[:num_joint, 0], pose[:num_joint, 2], pose[:num_joint, 1],c='b')
# ax.scatter(pose[num_joint:num_joint*2, 0], pose[num_joint:num_joint*2, 2], pose[num_joint:num_joint*2, 1],c='b')
# ax.scatter(pose[num_joint*2:num_joint*3, 0], pose[num_joint*2:num_joint*3, 2], pose[num_joint*2:num_joint*3, 1],c='g')
xmin = np.min(pose[:, 0])
ymin = np.min(pose[:, 2])
zmin = np.min(pose[:, 1])
xmax = np.max(pose[:, 0])
ymax = np.max(pose[:, 2])
zmax = np.max(pose[:, 1])
scale = np.max([xmax - xmin, ymax - ymin, zmax - zmin])
xmid = (xmax + xmin) // 2
ymid = (ymax + ymin) // 2
zmid = (zmax + zmin) // 2
ax.set_xlim(xmid - scale // 2, xmid + scale // 2)
ax.set_ylim(ymid - scale // 2, ymid + scale // 2)
ax.set_zlim(zmid - scale // 2, zmid + scale // 2)
plt.draw()
plt.savefig(prefix + '_' + str(cur_frame)+'.png', dpi=200, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='test-base.yaml')
args = parser.parse_args()
opt = yaml.load(open('./config/' + args.cfg, 'r').read())
model_dir =opt['test']['model_dir']
## initilize the skeleton ##
skeleton_mocap = Skeleton(offsets=opt['data']['offsets'], parents=opt['data']['parents'])
skeleton_mocap.cuda()
skeleton_mocap.remove_joints(opt['data']['joints_to_remove'])
## load train data ##
lafan_data_test = LaFan1(opt['data']['data_dir'], \
seq_len = opt['model']['seq_length'], \
offset = opt['data']['offset'],\
train = opt['test']['save_pose'], debug=opt['test']['debug'])
lafan_data_test.cur_seq_length = opt['model']['seq_length']
x_mean = lafan_data_test.x_mean.cuda()
x_std = lafan_data_test.x_std.cuda().view(1, 1, opt['model']['num_joints'], 3)
lafan_loader_test = DataLoader(lafan_data_test, \
batch_size=opt['test']['batch_size'], \
shuffle=False, num_workers=opt['data']['num_workers'])
## initialize model and load parameters ##
state_encoder = StateEncoder(in_dim=opt['model']['state_input_dim'])
state_encoder = state_encoder.cuda()
state_encoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'state_encoder.pkl')))
offset_encoder = OffsetEncoder(in_dim=opt['model']['offset_input_dim'])
offset_encoder = offset_encoder.cuda()
offset_encoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'offset_encoder.pkl')))
target_encoder = TargetEncoder(in_dim=opt['model']['target_input_dim'])
target_encoder = target_encoder.cuda()
target_encoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'target_encoder.pkl')))
lstm = LSTM(in_dim=opt['model']['lstm_dim'], hidden_dim = opt['model']['lstm_dim'] * 2)
lstm = lstm.cuda()
lstm.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'lstm.pkl')))
decoder = Decoder(in_dim=opt['model']['lstm_dim'] * 2, out_dim=opt['model']['state_input_dim'])
decoder = decoder.cuda()
decoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'decoder.pkl')))
print('model loaded')
count_param(state_encoder, 'state_encoder')
count_param(offset_encoder, 'offset_encoder')
count_param(target_encoder, 'target_encoder')
count_param(lstm, 'lstm')
count_param(decoder, 'decoder')
## get positional code ##
if opt['test']['use_ztta']:
ztta = gen_ztta().cuda()
# print('ztta:', ztta.size())
# assert 0
version = opt['test']['version']
# writer = SummaryWriter(log_dir)
loss_total_min = 10000000.0
for epoch in range(opt['test']['num_epoch']):
state_encoder.eval()
offset_encoder.eval()
target_encoder.eval()
lstm.eval()
decoder.eval()
loss_total_list = []
for i_batch, sampled_batch in enumerate(lafan_loader_test):
# if i_batch != 33:
# continue
pred_img_list = []
gt_img_list = []
img_list = []
# print(i_batch, sample_batched['local_q'].size())
# loss_pos = 0
# loss_quat = 0
# loss_contact = 0
# loss_root = 0
with torch.no_grad():
# if True:
# state input
local_q = sampled_batch['local_q'].cuda() # 32 x 50 x 22 x 4
root_v = sampled_batch['root_v'].cuda()
contact = sampled_batch['contact'].cuda()
# offset input
root_p_offset = sampled_batch['root_p_offset'].cuda() # 32 x 3
local_q_offset = sampled_batch['local_q_offset'].cuda() # 32 x 88
local_q_offset = local_q_offset.view(local_q_offset.size(0), -1)
# target input
target = sampled_batch['target'].cuda()
target = target.view(target.size(0), -1)
# root pos
root_p = sampled_batch['root_p'].cuda() # 32 x 50 x 3
# X
X = sampled_batch['X'].cuda()
bs = 6#np.random.choice(X.size(0), 1)[0]
# if False:
# print('local_q:', local_q.size(), \
# 'root_v:', root_v.size(), \
# 'contact:', contact.size(), \
# 'root_p_offset:', root_p_offset.size(), \
# 'local_q_offset:', local_q_offset.size(), \
# 'target:', target.size())
# assert 0
lstm.init_hidden(local_q.size(0))
h_list = []
quat_list = []
quat_list.append(local_q[:,0,].view(local_q.size(0), -1, 4))
pred_list = []
pred_list.append(X[:,0])
bvh_list = []
bvh_list.append(torch.cat([X[:,0,0], local_q[:,0,].view(local_q.size(0), -1)], -1))
contact_list = []
contact_list.append(contact[:,0])
root_list = []
root_list.append(X[:,0,0])
# print(X.size())
# for t in range(opt['model']['seq_length'] - 1):
for txn in range((opt['model']['seq_length'] - 1) * 10):
if txn < (opt['model']['seq_length'] - 1):
t = txn
# root pos
if t == 0:
root_p_t = root_p[:,t]
local_q_t = local_q[:,t]
local_q_t = local_q_t.view(local_q_t.size(0), -1)
contact_t = contact[:,t]
root_v_t = root_v[:,t]
else:
root_p_t = root_pred[0]
local_q_t = local_q_pred[0]
contact_t = contact_pred[0]
root_v_t = root_v_pred[0]
else:
# if True:
# reset t
t = txn % (opt['model']['seq_length'] - 1)
if t == 0:
# reset target pose,
sz = local_q.shape # 32 x 50 x 22 x 4
random_idx = np.random.randint(low=[0, 1], high=[sz[0], sz[1]], size=(sz[0], 2))
# offset input
# root_p_offset = sampled_batch['root_p_offset'].cuda()
tmp_root_pred = root_pred * 1. # 1x32x3
tmp_root_p_offset = root_p[random_idx[:, 0], random_idx[:, 1]] * 1.
random_mv = (torch.rand_like(root_p_offset) - 0.5) * 2.
random_mv[:, 0] = random_mv[:, 0] * 100. + tmp_root_pred[0, :, 0] * 1.
random_mv[:, 1] = random_mv[:, 0] * 0. + tmp_root_p_offset[:, 1] * 1. # assume height no change
random_mv[:, 2] = random_mv[:, 2] * 100. + tmp_root_pred[0, :, 2] * 1.
root_p_offset = random_mv
# local_q_offset = sampled_batch['local_q_offset'].cuda()
local_q_offset = local_q[random_idx[:, 0], random_idx[:, 1]] * 1.
local_q_offset = local_q_offset.view(local_q_offset.size(0), -1)
# target input
# target = sampled_batch['target'].cuda()
# target = target.view(target.size(0), -1)
target = local_q_offset * 1.
# reset lstm
lstm.init_hidden(local_q.size(0))
# target pose modify
pos_target = skeleton_mocap.forward_kinematics(local_q_offset.view(1, -1, 22, 4), root_p_offset.view(1, -1, 3))
X[:, 0] = pos_pred[0, :] * 1.
X[:,-1] = pos_target[0, :] * 1.
# continue on pose seq
root_p_t = root_pred[0]
local_q_t = local_q_pred[0]
contact_t = contact_pred[0]
root_v_t = root_v_pred[0]
# state input
state_input = torch.cat([local_q_t, root_v_t, contact_t], -1)
# offset input
root_p_offset_t = root_p_offset - root_p_t
local_q_offset_t = local_q_offset - local_q_t
# print('root_p_offset_t:', root_p_offset_t.size(), 'local_q_offset_t:', local_q_offset_t.size())
offset_input = torch.cat([root_p_offset_t, local_q_offset_t], -1)
# target input
target_input = target
# print('state_input:',state_input.size())
h_state = state_encoder(state_input)
h_offset = offset_encoder(offset_input)
h_target = target_encoder(target_input)
if opt['test']['use_ztta']:
h_state += ztta[:, t]
h_offset += ztta[:, t]
h_target += ztta[:, t]
if opt['test']['use_adv']:
tta = opt['model']['seq_length'] - 2 - t
if tta < 5:
lambda_target = 0.0
elif tta >=5 and tta < 30:
lambda_target = (tta - 5) / 25.0
else:
lambda_target = 1.0
h_offset += 0.5 * lambda_target * torch.cuda.FloatTensor(h_offset.size()).normal_()
h_target += 0.5 * lambda_target * torch.cuda.FloatTensor(h_target.size()).normal_()
h_in = torch.cat([h_state, h_offset, h_target], -1).unsqueeze(0)
h_out = lstm(h_in)
# print('h_out:', h_out.size())
h_pred, contact_pred = decoder(h_out)
local_q_v_pred = h_pred[:,:,:opt['model']['target_input_dim']]
local_q_pred = local_q_v_pred + local_q_t
# print('q_pred:', q_pred.size())
local_q_pred_ = local_q_pred.view(local_q_pred.size(0), local_q_pred.size(1), -1, 4)
local_q_pred_ = local_q_pred_ / torch.norm(local_q_pred_, dim = -1, keepdim = True)
# print("local_q_pred_:", local_q_pred_.size())
quat_list.append(local_q_pred_[0])
root_v_pred = h_pred[:,:,opt['model']['target_input_dim']:] # 1x32x3
root_pred = root_v_pred + root_p_t # 1x32x3
root_list.append(root_pred[0])
# print(''contact:'', contact_pred.size())
# print('root_pred:', root_pred.size())
bvh_list.append(torch.cat([root_pred[0], local_q_pred_[0].view(local_q_pred_.size(1), -1)], -1))
pos_pred = skeleton_mocap.forward_kinematics(local_q_pred_, root_pred)
# pos_next = X[:,t+1]
# local_q_next = local_q[:,t+1]
# local_q_next = local_q_next.view(local_q_next.size(0), -1)
# root_p_next = root_p[:,t+1]
# contact_next = contact[:,t+1]
# # print(pos_pred.size(), x_std.size())
# loss_pos += torch.mean(torch.abs(pos_pred[0] - pos_next) / x_std) / opt['model']['seq_length']
# loss_quat += torch.mean(torch.abs(local_q_pred[0] - local_q_next)) / opt['model']['seq_length']
# loss_root += torch.mean(torch.abs(root_pred[0] - root_p_next) / x_std[:,:,0]) / opt['model']['seq_length']
# loss_contact += torch.mean(torch.abs(contact_pred[0] - contact_next)) / opt['model']['seq_length']
pred_list.append(pos_pred[0])
contact_list.append(contact_pred[0])
# if i_batch < 49:
# print("pos_pred:", pos_pred.size())
if opt['test']['save_img']:
mkd('../results' + version)
plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\
pos_pred[0, bs].view(22, 3).detach().cpu().numpy(),\
X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\
t, '../results'+version+'/pred')
# plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\
# X[bs,t+1].view(22, 3).detach().cpu().numpy(),\
# X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\
# t, '../results'+version+'/gt')
pred_img = Image.open('../results'+version+'/pred_'+str(t)+'.png', 'r')
# gt_img = Image.open('../results'+version+'/gt_'+str(t)+'.png', 'r')
pred_img_list.append(pred_img)
# gt_img_list.append(gt_img)
# img_list.append(np.concatenate([pred_img, gt_img.resize(pred_img.size)], 1))
# img_list.append(np.concatenate([pred_img, pred_img], 1))
img_list.append(np.array(pred_img))
# img_list.append(np.array(pred_img))
# print('pivots:', pivots.shape)
# print('rot_data.size:', rot_data.shape)
if opt['test']['save_bvh']:
assert False, 'not work currently, very slow'
# print("bs:", bs)
bvh_data = torch.cat([x[bs].unsqueeze(0) for x in bvh_list], 0).detach().cpu().numpy()
# print('bvh_data:', bvh_data.shape)
# print('bvh_data:', bvh_data[0,3:7])
# assert 0
mkd('../bvh_seq')
write_to_bvhfile(bvh_data, ('../bvh_seq/test_%03d.bvh' % i_batch), opt['data']['joints_to_remove'])
# assert 0
contact_data = torch.cat([x[bs].unsqueeze(0) for x in contact_list], 0).detach().cpu().numpy()
# rot_data = torch.cat([x[bs].unsqueeze(0) for x in quat_list], 0).detach().cpu().numpy()
# root_data = torch.cat([x[bs].unsqueeze(0) for x in root_list], 0).detach().cpu().numpy()
# pred_pose = torch.cat([x[bs].unsqueeze(0) for x in pred_list], 0).detach().cpu().numpy()
# quaters, pivots = y_rotation_from_positions(pred_pose, hips = (1,5), sdrs = (14,18))
# motion = np.concatenate([rot_data.reshape(rot_data.shape[0], -1),\
# root_data,\
# pivots], -1)
# motion = motion.transpose(1,0)
foot = contact_data.transpose(1,0)
foot[foot > 0.5] = 1.0
foot[foot <= 0.5] = 0.0
# print('foot[0]:',foot[0])
mkd("../bvh_seq_after"+version)
glb = remove_fs(('../bvh_seq/test_%03d.bvh' % i_batch), \
foot, \
fid_l=(3, 4), \
fid_r=(7, 8),\
output_path=("../bvh_seq_after"+version+"/test_%03d.bvh" % i_batch))
fix_img_list = []
# for t in range(opt['model']['seq_length']):
for t in range((opt['model']['seq_length']-1)*10):
plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\
glb[t],\
X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\
t, '../results'+version+'/fixed')
# plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\
# X[bs,t].view(22, 3).detach().cpu().numpy(),\
# X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\
# t, '../results'+version+'/gt')
fix_img = Image.open('../results'+version+'/fixed_'+str(t)+'.png', 'r')
# gt_img = Image.open('../results'+version+'/gt_'+str(t)+'.png', 'r')
# fix_img_list.append(np.concatenate([fix_img, gt_img.resize(fix_img.size)], 1))
fix_img_list.append(np.array(fix_img))
mkd('../gif_fixed'+version)
imageio.mimsave(('../gif_fixed'+version+'/img_fix_%03d.gif' % i_batch), fix_img_list, duration=0.1)
# save_bvh_from_network_output(motion, output_path=("../bvh_seq_after/test_%03d.bvh" % i_batch))
# if i_batch < 49:
if opt['test']['save_img'] and opt['test']['save_gif']:
mkd('../gif'+version)
imageio.mimsave(('../gif'+version+'/img_%03d.gif' % i_batch), img_list, duration=0.1)
if opt['test']['save_pose']:
# gt_pose = X[bs,:].view(opt['model']['seq_length'], 22, 3).detach().cpu().numpy()
# pred_pose = torch.cat([x[bs].unsqueeze(0) for x in pred_list], 0).detach().cpu().numpy()
# plt.clf()
# joint_idx = 13
# plt.plot(range(opt['model']['seq_length']), gt_pose[:,joint_idx,0])
# plt.plot(range(opt['model']['seq_length']), pred_pose[:,joint_idx,0])
# plt.legend(['gt', 'pred'])
# plt.savefig('../results'+version+'/pose_%03d.png' % i_batch)
# plt.close()
pred_pose = torch.stack(pred_list, 1).detach().cpu().numpy()
npy_name = '../traj_pose'+version+'/batch_%03d.npy' % i_batch
mkd('../traj_pose'+version)
np.save(npy_name, pred_pose)
# if opt['test']['save_img'] and i_batch > 49:
# break
if opt['test']['save_pose'] and i_batch > 49:
break
# print("train epoch: %03d, cur total loss:%.3f, cur best loss:%.3f" % (epoch, loss_total_cur, loss_total_min))
| 21,956 | 51.154394 | 139 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/skeleton.py | import torch
import os
import numpy as np
import sys
sys.path.insert(0, os.path.dirname(__file__))
from quaternion import qmul_np, qmul, qrot
from torch.utils.data import Dataset, DataLoader
from LaFan import LaFan1
class Skeleton:
def __init__(self, offsets, parents, joints_left=None, joints_right=None):
assert len(offsets) == len(parents)
self._offsets = torch.FloatTensor(offsets)
self._parents = np.array(parents)
self._joints_left = joints_left
self._joints_right = joints_right
self._compute_metadata()
def cuda(self):
self._offsets = self._offsets.cuda()
return self
def num_joints(self):
return self._offsets.shape[0]
def offsets(self):
return self._offsets
def parents(self):
return self._parents
def has_children(self):
return self._has_children
def children(self):
return self._children
def remove_joints(self, joints_to_remove):
"""
Remove the joints specified in 'joints_to_remove', both from the
skeleton definition and from the dataset (which is modified in place).
The rotations of removed joints are propagated along the kinematic chain.
"""
valid_joints = []
for joint in range(len(self._parents)):
if joint not in joints_to_remove:
valid_joints.append(joint)
index_offsets = np.zeros(len(self._parents), dtype=int)
new_parents = []
for i, parent in enumerate(self._parents):
if i not in joints_to_remove:
new_parents.append(parent - index_offsets[parent])
else:
index_offsets[i:] += 1
self._parents = np.array(new_parents)
self._offsets = self._offsets[valid_joints]
self._compute_metadata()
def forward_kinematics(self, rotations, root_positions):
"""
Perform forward kinematics using the given trajectory and local rotations.
Arguments (where N = batch size, L = sequence length, J = number of joints):
-- rotations: (N, L, J, 4) tensor of unit quaternions describing the local rotations of each joint.
-- root_positions: (N, L, 3) tensor describing the root joint positions.
"""
assert len(rotations.shape) == 4
assert rotations.shape[-1] == 4
positions_world = []
rotations_world = []
expanded_offsets = self._offsets.expand(rotations.shape[0], rotations.shape[1],
self._offsets.shape[0], self._offsets.shape[1])
# Parallelize along the batch and time dimensions
for i in range(self._offsets.shape[0]):
if self._parents[i] == -1:
positions_world.append(root_positions)
rotations_world.append(rotations[:, :, 0])
else:
positions_world.append(qrot(rotations_world[self._parents[i]], expanded_offsets[:, :, i]) \
+ positions_world[self._parents[i]])
if self._has_children[i]:
rotations_world.append(qmul(rotations_world[self._parents[i]], rotations[:, :, i]))
else:
# This joint is a terminal node -> it would be useless to compute the transformation
rotations_world.append(None)
return torch.stack(positions_world, dim=3).permute(0, 1, 3, 2)
def joints_left(self):
return self._joints_left
def joints_right(self):
return self._joints_right
def _compute_metadata(self):
self._has_children = np.zeros(len(self._parents)).astype(bool)
for i, parent in enumerate(self._parents):
if parent != -1:
self._has_children[parent] = True
self._children = []
for i, parent in enumerate(self._parents):
self._children.append([])
for i, parent in enumerate(self._parents):
if parent != -1:
self._children[parent].append(i)
if __name__=="__main__":
skeleton_mocap = Skeleton(offsets=[
[-42.198200,91.614723,-40.067841],
[ 0.103456,1.857829,10.548506],
[43.499992,-0.000038,-0.000002],
[42.372192,0.000015,-0.000007],
[ 17.299999,-0.000002,0.000003],
[0.000000,0.000000,0.000000],
[0.103457,1.857829,-10.548503],
[43.500042,-0.000027,0.000008],
[42.372257,-0.000008,0.000014],
[17.299992,-0.000005,0.000004],
[0.000000,0.000000,0.000000],
[6.901968,-2.603733,-0.000001],
[12.588099,0.000002,0.000000],
[12.343206,0.000000,-0.000001],
[25.832886,-0.000004,0.000003],
[11.766620,0.000005,-0.000001],
[0.000000,0.000000,0.000000],
[19.745899,-1.480370,6.000108],
[11.284125,-0.000009,-0.000018],
[33.000050,0.000004,0.000032],
[25.200008,0.000015,0.000008],
[0.000000,0.000000,0.000000],
[19.746099,-1.480375,-6.000073],
[11.284138,-0.000015,-0.000012],
[33.000092,0.000017,0.000013],
[25.199780,0.000135,0.000422],
[0.000000,0.000000,0.000000]
],
parents=[-1, 0, 1, 2, 3, 4,\
0, 6, 7, 8, 9,\
0, 11, 12, 13, 14, 15,\
13, 17, 18, 19, 20,
13, 22, 23, 24, 25])
skeleton_mocap.remove_joints([5,10,16,21,26])
# lafan_data = LaFan1('D:\\ubisoft-laforge-animation-dataset\\lafan1\\lafan1', train = False, debug=False)
lafan_data = LaFan1('./lafan1/lafan1', train = False, debug=True)
lafan_loader = DataLoader(lafan_data, batch_size=32, shuffle=False, num_workers=4)
for i_batch, sample_batched in enumerate(lafan_loader):
pos_batch = skeleton_mocap.forward_kinematics(sample_batched['local_q'], sample_batched['root_p'])
# print(pos_batch[0,:,0].cpu().numpy())
# break
| 6,005 | 35.846626 | 110 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/LaFan.py | import torch
from torch.utils.data import Dataset, DataLoader
import sys, os
sys.path.insert(0, os.path.dirname(__file__))
sys.path.append("..")
import numpy as np
from lafan1 import extract, utils, benchmarks
class LaFan1(Dataset):
def __init__(self, bvh_path, train = False, seq_len = 50, offset = 10, debug = False):
"""
Args:
bvh_path (string): Path to the bvh files.
seq_len (int): The max len of the sequence for interpolation.
"""
if train:
self.actors = ['h36m_take_{:0>3d}'.format(i) for i in range(600)][:550]
else:
# self.actors = ['subject5']
self.actors = ['h36m_take_{:0>3d}'.format(i) for i in range(600)][550:]
self.train = train
self.seq_len = seq_len
self.debug = debug
if self.debug:
self.actors = ['h36m_take_{:0>3d}'.format(i) for i in range(600)][:2]
self.offset = offset
self.data = self.load_data(bvh_path)
self.cur_seq_length = 5
def load_data(self, bvh_path):
# Get test-set for windows of 65 frames, offset by 40 frames
print('Building the data set...')
X, Q, parents, contacts_l, contacts_r = extract.get_lafan1_set(\
bvh_path, self.actors, window=self.seq_len, offset=self.offset, debug = self.debug)
# Global representation:
q_glbl, x_glbl = utils.quat_fk(Q, X, parents)
# if self.train:
# Global positions stats:
x_mean = np.mean(x_glbl.reshape([x_glbl.shape[0], x_glbl.shape[1], -1]).transpose([0, 2, 1]), axis=(0, 2), keepdims=True)
x_std = np.std(x_glbl.reshape([x_glbl.shape[0], x_glbl.shape[1], -1]).transpose([0, 2, 1]), axis=(0, 2), keepdims=True)
self.x_mean = torch.from_numpy(x_mean)
self.x_std = torch.from_numpy(x_std)
input_ = {}
# The following features are inputs:
# 1. local quaternion vector (J * 4d)
input_['local_q'] = Q
# 2. global root velocity vector (3d)
input_['root_v'] = x_glbl[:,1:,0,:] - x_glbl[:,:-1,0,:]
# 3. contact information vector (4d)
input_['contact'] = np.concatenate([contacts_l, contacts_r], -1)
# 4. global root position offset (?d)
input_['root_p_offset'] = x_glbl[:,-1,0,:]
# 5. local quaternion offset (?d)
input_['local_q_offset'] = Q[:,-1,:,:]
# 6. target
input_['target'] = Q[:,-1,:,:]
# 7. root pos
input_['root_p'] = x_glbl[:,:,0,:]
# 8. X
input_['X'] = x_glbl[:,:,:,:]
print('Nb of sequences : {}\n'.format(X.shape[0]))
return input_
def __len__(self):
return len(self.data['local_q'])
def __getitem__(self, idx):
idx_ = None
if self.debug:
idx_ = 0
else:
idx_ = idx
sample = {}
sample['local_q'] = self.data['local_q'][idx_].astype(np.float32)
sample['root_v'] = self.data['root_v'][idx_].astype(np.float32)
sample['contact'] = self.data['contact'][idx_].astype(np.float32)
sample['root_p_offset'] = self.data['root_p_offset'][idx_].astype(np.float32)
sample['local_q_offset'] = self.data['local_q_offset'][idx_].astype(np.float32)
sample['target'] = self.data['target'][idx_].astype(np.float32)
sample['root_p'] = self.data['root_p'][idx_].astype(np.float32)
sample['X'] = self.data['X'][idx_].astype(np.float32)
# sample['local_q_aug'] = self.data['local_q'][idx_].astype(np.float32)
# sample['root_v_aug'] = self.data['root_v'][idx_].astype(np.float32)
# sample['contact_aug'] = self.data['contact'][idx_].astype(np.float32)
# ## data aug ##
# sample['root_p_offset'] = self.data['root_p_offset'][idx_].astype(np.float32)
# sample['local_q_offset'] = self.data['local_q_offset'][idx_].astype(np.float32)
# sample['target'] = self.data['target'][idx_].astype(np.float32)
# sample['root_p'] = self.data['root_p'][idx_].astype(np.float32)
# sample['X'] = self.data['X'][idx_].astype(np.float32)
return sample
if __name__=="__main__":
lafan_data = LaFan1('D:\\ubisoft-laforge-animation-dataset\\lafan1\\lafan1')
print(lafan_data.data_X.shape, lafan_data.data_Q.shape)
| 4,399 | 38.285714 | 131 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/rlpose2bvh.py |
import random
import argparse
import torch.optim as optim
import os
import datetime
import os.path as path
from torch.autograd import Variable
from progress.bar import Bar
from time import time
from bvh_skeleton import humanoid_rib_skeleton
from bvh_skeleton.camera import world2cam_sktpos
import torch
import numpy as np
from tqdm import tqdm
import pickle
import multiprocessing
import math
def rlpose2bvh(take_list, expert_dict):
##########################################################
# save .
# result_dict = {}
takes = take_list
result_all_dict = expert_dict
write_standard_bvh_multi_process(takes, result_all_dict)
##########################################################
return
def write_standard_bvh_multi_process(takes, result_all_dict):
def wrap_write_standard_bvh(take):
predicted_3d_wpos_withroot = np.copy(result_all_dict[take]['skt_wpos']).reshape(-1, 16, 3)
# ground_z = np.min(predicted_3d_wpos_withroot[:, :, -1:])
# ground_z = np.min(predicted_3d_wpos_withroot[:, :, -1:], axis=(1,2), keepdims=True)
# predicted_3d_wpos_withroot[:, :, -1:] = predicted_3d_wpos_withroot[:, :, -1:] - ground_z
bvhfileName = '{}/{}.bvh'.format(traj_save_path, take)
write_standard_bvh(bvhfileName, predicted_3d_wpos_withroot)
# start
task_lst = takes
# num_threads = args.num_threads
for ep in range(math.ceil(len(task_lst) / num_threads)):
p_lst = []
for i in range(num_threads):
idx = ep * num_threads + i
if idx >= len(task_lst):
break
p = multiprocessing.Process(target=wrap_write_standard_bvh, args=(task_lst[idx],))
p_lst.append(p)
for p in p_lst:
p.start()
for p in p_lst:
p.join()
print('complete ep:', ep)
# end.
def write_standard_bvh(bvhfileName, prediction3dpoint):
'''
:param outbvhfilepath:
:param prediction3dpoint:
:return:
'''
#
prediction3dpoint = world2cam_sktpos(prediction3dpoint) * -1
for frame in prediction3dpoint:
for point3d in frame:
point3d[0] *= 100
point3d[1] *= 100
point3d[2] *= 100
mkd(bvhfileName)
# 16 joint 21 joint
Converter = humanoid_rib_skeleton.SkeletonConverter()
prediction3dpoint = Converter.convert_to_22joint(prediction3dpoint)
# bvh .
human36m_skeleton = humanoid_rib_skeleton.H36mSkeleton()
human36m_skeleton.poses2bvh(prediction3dpoint, output_file=bvhfileName)
# ..mk dir
def mkd(target_dir, get_parent=True):
# get parent path and create
if get_parent:
savedir = os.path.abspath(os.path.join(target_dir, os.pardir))
else:
savedir = target_dir
if not os.path.exists(savedir):
os.makedirs(savedir, exist_ok=True)
if __name__ == '__main__':
"""
convert RL motion to rib motion
"""
parser = argparse.ArgumentParser()
parser.add_argument('--pkl_path', type=str, default='debug')
args = parser.parse_args()
pkl_path = args.pkl_path
######################################################################
expert_dict = pickle.load(open(pkl_path, 'rb'))
take_list = ['h36m_take_{:0>3d}'.format(i) for i in range(600)]
num_threads = 32
traj_save_path = './lafan1/lafan1'
rlpose2bvh(take_list, expert_dict)
| 3,416 | 25.905512 | 98 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.